1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <vector>
11 #include "include/gpu/GrDirectContext.h"
12 #include "include/private/GrSingleOwner.h"
13 #include "include/private/SkTo.h"
14 #include "include/utils/SkRandom.h"
15 #include "src/core/SkMessageBus.h"
16 #include "src/core/SkOpts.h"
17 #include "src/core/SkScopeExit.h"
18 #include "src/core/SkTSort.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGpuResourceCacheAccess.h"
22 #include "src/gpu/GrProxyProvider.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrTextureProxyCacheAccess.h"
25 #include "src/gpu/GrThreadSafeCache.h"
26 #include "src/gpu/GrTracing.h"
27 #include "src/gpu/SkGr.h"
28
29 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
30
31 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
32
33 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
34
35 //////////////////////////////////////////////////////////////////////////////
36
GenerateResourceType()37 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
38 static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
39
40 int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
41 if (type > SkTo<int32_t>(UINT16_MAX)) {
42 SK_ABORT("Too many Resource Types");
43 }
44
45 return static_cast<ResourceType>(type);
46 }
47
GenerateDomain()48 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
49 static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
50
51 int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
52 if (domain > SkTo<int32_t>(UINT16_MAX)) {
53 SK_ABORT("Too many GrUniqueKey Domains");
54 }
55
56 return static_cast<Domain>(domain);
57 }
58
GrResourceKeyHash(const uint32_t * data,size_t size)59 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
60 return SkOpts::hash(data, size);
61 }
62
63 //////////////////////////////////////////////////////////////////////////////
64
65 class GrResourceCache::AutoValidate : ::SkNoncopyable {
66 public:
AutoValidate(GrResourceCache * cache)67 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()68 ~AutoValidate() { fCache->validate(); }
69 private:
70 GrResourceCache* fCache;
71 };
72
73 //////////////////////////////////////////////////////////////////////////////
74
75 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
76
TextureAwaitingUnref(GrTexture * texture)77 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
78 : fTexture(texture), fNumUnrefs(1) {}
79
TextureAwaitingUnref(TextureAwaitingUnref && that)80 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
81 fTexture = std::exchange(that.fTexture, nullptr);
82 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
83 }
84
operator =(TextureAwaitingUnref && that)85 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
86 TextureAwaitingUnref&& that) {
87 fTexture = std::exchange(that.fTexture, nullptr);
88 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
89 return *this;
90 }
91
~TextureAwaitingUnref()92 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
93 if (fTexture) {
94 for (int i = 0; i < fNumUnrefs; ++i) {
95 fTexture->unref();
96 }
97 }
98 }
99
addRef()100 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
101
unref()102 inline void GrResourceCache::TextureAwaitingUnref::unref() {
103 SkASSERT(fNumUnrefs > 0);
104 fTexture->unref();
105 --fNumUnrefs;
106 }
107
finished()108 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
109
110 //////////////////////////////////////////////////////////////////////////////
111
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)112 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
113 GrDirectContext::DirectContextID owningContextID,
114 uint32_t familyID)
115 : fInvalidUniqueKeyInbox(familyID)
116 , fFreedTextureInbox(owningContextID)
117 , fOwningContextID(owningContextID)
118 , fContextUniqueID(familyID)
119 , fSingleOwner(singleOwner) {
120 SkASSERT(owningContextID.isValid());
121 SkASSERT(familyID != SK_InvalidUniqueID);
122 }
123
~GrResourceCache()124 GrResourceCache::~GrResourceCache() {
125 this->releaseAll();
126 }
127
setLimit(size_t bytes)128 void GrResourceCache::setLimit(size_t bytes) {
129 fMaxBytes = bytes;
130 this->purgeAsNeeded();
131 }
132
insertResource(GrGpuResource * resource)133 void GrResourceCache::insertResource(GrGpuResource* resource) {
134 ASSERT_SINGLE_OWNER
135 SkASSERT(resource);
136 SkASSERT(!this->isInCache(resource));
137 SkASSERT(!resource->wasDestroyed());
138 SkASSERT(!resource->resourcePriv().isPurgeable());
139 if (!resource || this->isInCache(resource) || resource->wasDestroyed() || resource->resourcePriv().isPurgeable()) {
140 SkDebugf("OHOS GrResourceCache::insertResource resource is invalid!!!");
141 return;
142 }
143 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
144 // up iterating over all the resources that already have timestamps.
145 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
146
147 this->addToNonpurgeableArray(resource);
148
149 size_t size = resource->gpuMemorySize();
150 SkDEBUGCODE(++fCount;)
151 fBytes += size;
152 #if GR_CACHE_STATS
153 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
154 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
155 #endif
156 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
157 ++fBudgetedCount;
158 fBudgetedBytes += size;
159 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
160 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
161 #if GR_CACHE_STATS
162 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
163 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
164 #endif
165 }
166 SkASSERT(!resource->cacheAccess().isUsableAsScratch());
167 this->purgeAsNeeded();
168 }
169
removeResource(GrGpuResource * resource)170 void GrResourceCache::removeResource(GrGpuResource* resource) {
171 ASSERT_SINGLE_OWNER
172 this->validate();
173 SkASSERT(this->isInCache(resource));
174
175 size_t size = resource->gpuMemorySize();
176 if (resource->resourcePriv().isPurgeable() && this->isInPurgeableCache(resource)) {
177 fPurgeableQueue.remove(resource);
178 fPurgeableBytes -= size;
179 } else if (this->isInNonpurgeableCache(resource)) {
180 this->removeFromNonpurgeableArray(resource);
181 }
182
183 SkDEBUGCODE(--fCount;)
184 fBytes -= size;
185 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
186 --fBudgetedCount;
187 fBudgetedBytes -= size;
188 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
189 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
190 }
191
192 if (resource->cacheAccess().isUsableAsScratch()) {
193 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
194 }
195 if (resource->getUniqueKey().isValid()) {
196 fUniqueHash.remove(resource->getUniqueKey());
197 }
198 this->validate();
199 }
200
abandonAll()201 void GrResourceCache::abandonAll() {
202 AutoValidate av(this);
203
204 // We need to make sure to free any resources that were waiting on a free message but never
205 // received one.
206 fTexturesAwaitingUnref.reset();
207
208 while (fNonpurgeableResources.count()) {
209 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
210 SkASSERT(!back->wasDestroyed());
211 back->cacheAccess().abandon();
212 }
213
214 while (fPurgeableQueue.count()) {
215 GrGpuResource* top = fPurgeableQueue.peek();
216 SkASSERT(!top->wasDestroyed());
217 top->cacheAccess().abandon();
218 }
219
220 fThreadSafeCache->dropAllRefs();
221
222 SkASSERT(!fScratchMap.count());
223 SkASSERT(!fUniqueHash.count());
224 SkASSERT(!fCount);
225 SkASSERT(!this->getResourceCount());
226 SkASSERT(!fBytes);
227 SkASSERT(!fBudgetedCount);
228 SkASSERT(!fBudgetedBytes);
229 SkASSERT(!fPurgeableBytes);
230 SkASSERT(!fTexturesAwaitingUnref.count());
231 }
232
releaseAll()233 void GrResourceCache::releaseAll() {
234 AutoValidate av(this);
235
236 fThreadSafeCache->dropAllRefs();
237
238 this->processFreedGpuResources();
239
240 // We need to make sure to free any resources that were waiting on a free message but never
241 // received one.
242 fTexturesAwaitingUnref.reset();
243
244 SkASSERT(fProxyProvider); // better have called setProxyProvider
245 SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
246
247 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
248 // they also have a raw pointer back to this class (which is presumably going away)!
249 fProxyProvider->removeAllUniqueKeys();
250
251 while (fNonpurgeableResources.count()) {
252 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
253 SkASSERT(!back->wasDestroyed());
254 back->cacheAccess().release();
255 }
256
257 while (fPurgeableQueue.count()) {
258 GrGpuResource* top = fPurgeableQueue.peek();
259 SkASSERT(!top->wasDestroyed());
260 top->cacheAccess().release();
261 }
262
263 SkASSERT(!fScratchMap.count());
264 SkASSERT(!fUniqueHash.count());
265 SkASSERT(!fCount);
266 SkASSERT(!this->getResourceCount());
267 SkASSERT(!fBytes);
268 SkASSERT(!fBudgetedCount);
269 SkASSERT(!fBudgetedBytes);
270 SkASSERT(!fPurgeableBytes);
271 SkASSERT(!fTexturesAwaitingUnref.count());
272 }
273
releaseByTag(const GrGpuResourceTag & tag)274 void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
275 AutoValidate av(this);
276 this->processFreedGpuResources();
277 SkASSERT(fProxyProvider); // better have called setProxyProvider
278 std::vector<GrGpuResource*> recycleVector;
279 for (int i = 0; i < fNonpurgeableResources.count(); i++) {
280 GrGpuResource* resource = fNonpurgeableResources[i];
281 if (tag.filter(resource->getResourceTag())) {
282 recycleVector.emplace_back(resource);
283 if (resource->getUniqueKey().isValid()) {
284 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
285 GrProxyProvider::InvalidateGPUResource::kNo);
286 }
287 }
288 }
289
290 for (int i = 0; i < fPurgeableQueue.count(); i++) {
291 GrGpuResource* resource = fPurgeableQueue.at(i);
292 if (tag.filter(resource->getResourceTag())) {
293 recycleVector.emplace_back(resource);
294 if (resource->getUniqueKey().isValid()) {
295 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
296 GrProxyProvider::InvalidateGPUResource::kNo);
297 }
298 }
299 }
300
301 for (auto resource : recycleVector) {
302 SkASSERT(!resource->wasDestroyed());
303 resource->cacheAccess().release();
304 }
305 }
306
setCurrentGrResourceTag(const GrGpuResourceTag & tag)307 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
308 if (tag.isGrTagValid()) {
309 grResourceTagCacheStack.push(tag);
310 return;
311 }
312 if (!grResourceTagCacheStack.empty()) {
313 grResourceTagCacheStack.pop();
314 }
315 }
316
popGrResourceTag()317 void GrResourceCache::popGrResourceTag()
318 {
319 if (!grResourceTagCacheStack.empty()) {
320 grResourceTagCacheStack.pop();
321 }
322 }
323
getCurrentGrResourceTag() const324 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
325 if (grResourceTagCacheStack.empty()) {
326 return{};
327 }
328 return grResourceTagCacheStack.top();
329 }
330
getAllGrGpuResourceTags() const331 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
332 std::set<GrGpuResourceTag> result;
333 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
334 auto tag = fNonpurgeableResources[i]->getResourceTag();
335 result.insert(tag);
336 }
337 return result;
338 }
339
refResource(GrGpuResource * resource)340 void GrResourceCache::refResource(GrGpuResource* resource) {
341 SkASSERT(resource);
342 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
343 if (resource->cacheAccess().hasRef()) {
344 resource->ref();
345 } else {
346 this->refAndMakeResourceMRU(resource);
347 }
348 this->validate();
349 }
350
351 class GrResourceCache::AvailableForScratchUse {
352 public:
AvailableForScratchUse()353 AvailableForScratchUse() { }
354
operator ()(const GrGpuResource * resource) const355 bool operator()(const GrGpuResource* resource) const {
356 // Everything that is in the scratch map should be usable as a
357 // scratch resource.
358 return true;
359 }
360 };
361
findAndRefScratchResource(const GrScratchKey & scratchKey)362 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
363 SkASSERT(scratchKey.isValid());
364
365 GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
366 if (resource) {
367 fScratchMap.remove(scratchKey, resource);
368 if (!this->isInCache(resource)) {
369 SkDebugf("OHOS GrResourceCache::findAndRefScratchResource not in cache, return!!!");
370 return nullptr;
371 }
372 this->refAndMakeResourceMRU(resource);
373 this->validate();
374 }
375 return resource;
376 }
377
willRemoveScratchKey(const GrGpuResource * resource)378 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
379 ASSERT_SINGLE_OWNER
380 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
381 if (resource->cacheAccess().isUsableAsScratch()) {
382 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
383 }
384 }
385
removeUniqueKey(GrGpuResource * resource)386 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
387 ASSERT_SINGLE_OWNER
388 // Someone has a ref to this resource in order to have removed the key. When the ref count
389 // reaches zero we will get a ref cnt notification and figure out what to do with it.
390 if (resource->getUniqueKey().isValid()) {
391 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
392 fUniqueHash.remove(resource->getUniqueKey());
393 }
394 resource->cacheAccess().removeUniqueKey();
395 if (resource->cacheAccess().isUsableAsScratch()) {
396 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
397 }
398
399 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
400 // require purging. However, the resource must be ref'ed to get here and therefore can't
401 // be purgeable. We'll purge it when the refs reach zero.
402 SkASSERT(!resource->resourcePriv().isPurgeable());
403 this->validate();
404 }
405
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)406 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
407 ASSERT_SINGLE_OWNER
408 SkASSERT(resource);
409 SkASSERT(this->isInCache(resource));
410
411 // If another resource has the new key, remove its key then install the key on this resource.
412 if (newKey.isValid()) {
413 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
414 // If the old resource using the key is purgeable and is unreachable, then remove it.
415 if (!old->resourcePriv().getScratchKey().isValid() &&
416 old->resourcePriv().isPurgeable()) {
417 old->cacheAccess().release();
418 } else {
419 // removeUniqueKey expects an external owner of the resource.
420 this->removeUniqueKey(sk_ref_sp(old).get());
421 }
422 }
423 SkASSERT(nullptr == fUniqueHash.find(newKey));
424
425 // Remove the entry for this resource if it already has a unique key.
426 if (resource->getUniqueKey().isValid()) {
427 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
428 fUniqueHash.remove(resource->getUniqueKey());
429 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
430 } else {
431 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
432 // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
433 // unique key until after this check.
434 if (resource->cacheAccess().isUsableAsScratch()) {
435 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
436 }
437 }
438
439 resource->cacheAccess().setUniqueKey(newKey);
440 fUniqueHash.add(resource);
441 } else {
442 this->removeUniqueKey(resource);
443 }
444
445 this->validate();
446 }
447
refAndMakeResourceMRU(GrGpuResource * resource)448 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
449 ASSERT_SINGLE_OWNER
450 SkASSERT(resource);
451 SkASSERT(this->isInCache(resource));
452
453 if (resource->resourcePriv().isPurgeable()) {
454 // It's about to become unpurgeable.
455 if (this->isInPurgeableCache(resource)) {
456 fPurgeableBytes -= resource->gpuMemorySize();
457 fPurgeableQueue.remove(resource);
458 }
459 if (!this->isInNonpurgeableCache(resource)) {
460 this->addToNonpurgeableArray(resource);
461 } else {
462 SkDebugf("OHOS resource in isInNonpurgeableCache, do not add again!");
463 }
464 } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
465 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
466 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
467 fNumBudgetedResourcesFlushWillMakePurgeable--;
468 }
469 resource->cacheAccess().ref();
470
471 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
472 this->validate();
473 }
474
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)475 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
476 GrGpuResource::LastRemovedRef removedRef) {
477 ASSERT_SINGLE_OWNER
478 SkASSERT(resource);
479 SkASSERT(!resource->wasDestroyed());
480 SkASSERT(this->isInCache(resource));
481 // This resource should always be in the nonpurgeable array when this function is called. It
482 // will be moved to the queue if it is newly purgeable.
483 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
484 if (!resource || resource->wasDestroyed() || this->isInPurgeableCache(resource) ||
485 !this->isInNonpurgeableCache(resource)) {
486 SkDebugf("OHOS GrResourceCache::notifyARefCntReachedZero return!");
487 return;
488 }
489 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
490 if (resource->cacheAccess().isUsableAsScratch()) {
491 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
492 }
493 }
494
495 if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
496 this->validate();
497 return;
498 }
499
500 #ifdef SK_DEBUG
501 // When the timestamp overflows validate() is called. validate() checks that resources in
502 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
503 // the purgeable queue happens just below in this function. So we mark it as an exception.
504 if (resource->resourcePriv().isPurgeable()) {
505 fNewlyPurgeableResourceForValidation = resource;
506 }
507 #endif
508 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
509 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
510
511 if (!resource->resourcePriv().isPurgeable() &&
512 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
513 ++fNumBudgetedResourcesFlushWillMakePurgeable;
514 }
515
516 if (!resource->resourcePriv().isPurgeable()) {
517 this->validate();
518 return;
519 }
520
521 this->removeFromNonpurgeableArray(resource);
522 fPurgeableQueue.insert(resource);
523 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
524 fPurgeableBytes += resource->gpuMemorySize();
525
526 bool hasUniqueKey = resource->getUniqueKey().isValid();
527
528 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
529
530 if (budgetedType == GrBudgetedType::kBudgeted) {
531 // Purge the resource immediately if we're over budget
532 // Also purge if the resource has neither a valid scratch key nor a unique key.
533 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
534 if (!this->overBudget() && hasKey) {
535 return;
536 }
537 } else {
538 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
539 // they can be reused again by the image connected to the unique key.
540 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
541 return;
542 }
543 // Check whether this resource could still be used as a scratch resource.
544 if (!resource->resourcePriv().refsWrappedObjects() &&
545 resource->resourcePriv().getScratchKey().isValid()) {
546 // We won't purge an existing resource to make room for this one.
547 if (this->wouldFit(resource->gpuMemorySize())) {
548 resource->resourcePriv().makeBudgeted();
549 return;
550 }
551 }
552 }
553
554 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
555 resource->cacheAccess().release();
556 // We should at least free this resource, perhaps dependent resources as well.
557 SkASSERT(this->getResourceCount() < beforeCount);
558 this->validate();
559 }
560
didChangeBudgetStatus(GrGpuResource * resource)561 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
562 ASSERT_SINGLE_OWNER
563 SkASSERT(resource);
564 SkASSERT(this->isInCache(resource));
565
566 size_t size = resource->gpuMemorySize();
567 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
568 // resource become purgeable. However, we should never allow that transition. Wrapped
569 // resources are the only resources that can be in that state and they aren't allowed to
570 // transition from one budgeted state to another.
571 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
572 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
573 ++fBudgetedCount;
574 fBudgetedBytes += size;
575 #if GR_CACHE_STATS
576 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
577 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
578 #endif
579 if (!resource->resourcePriv().isPurgeable() &&
580 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
581 ++fNumBudgetedResourcesFlushWillMakePurgeable;
582 }
583 if (resource->cacheAccess().isUsableAsScratch()) {
584 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
585 }
586 this->purgeAsNeeded();
587 } else {
588 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
589 --fBudgetedCount;
590 fBudgetedBytes -= size;
591 if (!resource->resourcePriv().isPurgeable() &&
592 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
593 --fNumBudgetedResourcesFlushWillMakePurgeable;
594 }
595 if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
596 resource->resourcePriv().getScratchKey().isValid()) {
597 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
598 }
599 }
600 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
601 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
602 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
603
604 this->validate();
605 }
606
purgeAsNeeded()607 void GrResourceCache::purgeAsNeeded() {
608 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
609 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
610 if (invalidKeyMsgs.count()) {
611 SkASSERT(fProxyProvider);
612
613 for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
614 if (invalidKeyMsgs[i].inThreadSafeCache()) {
615 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
616 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
617 } else {
618 fProxyProvider->processInvalidUniqueKey(
619 invalidKeyMsgs[i].key(), nullptr,
620 GrProxyProvider::InvalidateGPUResource::kYes);
621 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
622 }
623 }
624 }
625
626 this->processFreedGpuResources();
627
628 bool stillOverbudget = this->overBudget();
629 while (stillOverbudget && fPurgeableQueue.count()) {
630 GrGpuResource* resource = fPurgeableQueue.peek();
631 if (!resource->resourcePriv().isPurgeable()) {
632 SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable");
633 continue;
634 }
635 SkASSERT(resource->resourcePriv().isPurgeable());
636 resource->cacheAccess().release();
637 stillOverbudget = this->overBudget();
638 }
639
640 if (stillOverbudget) {
641 fThreadSafeCache->dropUniqueRefs(this);
642
643 stillOverbudget = this->overBudget();
644 while (stillOverbudget && fPurgeableQueue.count()) {
645 GrGpuResource* resource = fPurgeableQueue.peek();
646 if (!resource->resourcePriv().isPurgeable()) {
647 SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable after dropUniqueRefs");
648 continue;
649 }
650 SkASSERT(resource->resourcePriv().isPurgeable());
651 resource->cacheAccess().release();
652 stillOverbudget = this->overBudget();
653 }
654 }
655
656 this->validate();
657 }
658
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)659 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
660 bool scratchResourcesOnly) {
661
662 if (!scratchResourcesOnly) {
663 if (purgeTime) {
664 fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
665 } else {
666 fThreadSafeCache->dropUniqueRefs(nullptr);
667 }
668
669 // We could disable maintaining the heap property here, but it would add a lot of
670 // complexity. Moreover, this is rarely called.
671 while (fPurgeableQueue.count()) {
672 GrGpuResource* resource = fPurgeableQueue.peek();
673
674 const GrStdSteadyClock::time_point resourceTime =
675 resource->cacheAccess().timeWhenResourceBecamePurgeable();
676 if (purgeTime && resourceTime >= *purgeTime) {
677 // Resources were given both LRU timestamps and tagged with a frame number when
678 // they first became purgeable. The LRU timestamp won't change again until the
679 // resource is made non-purgeable again. So, at this point all the remaining
680 // resources in the timestamp-sorted queue will have a frame number >= to this
681 // one.
682 break;
683 }
684
685 SkASSERT(resource->resourcePriv().isPurgeable());
686 resource->cacheAccess().release();
687 }
688 } else {
689 // Early out if the very first item is too new to purge to avoid sorting the queue when
690 // nothing will be deleted.
691 if (purgeTime && fPurgeableQueue.count() &&
692 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
693 return;
694 }
695
696 // Sort the queue
697 fPurgeableQueue.sort();
698
699 // Make a list of the scratch resources to delete
700 SkTDArray<GrGpuResource*> scratchResources;
701 for (int i = 0; i < fPurgeableQueue.count(); i++) {
702 GrGpuResource* resource = fPurgeableQueue.at(i);
703
704 const GrStdSteadyClock::time_point resourceTime =
705 resource->cacheAccess().timeWhenResourceBecamePurgeable();
706 if (purgeTime && resourceTime >= *purgeTime) {
707 // scratch or not, all later iterations will be too recently used to purge.
708 break;
709 }
710 SkASSERT(resource->resourcePriv().isPurgeable());
711 if (!resource->getUniqueKey().isValid()) {
712 *scratchResources.append() = resource;
713 }
714 }
715
716 // Delete the scratch resources. This must be done as a separate pass
717 // to avoid messing up the sorted order of the queue
718 for (int i = 0; i < scratchResources.count(); i++) {
719 scratchResources.getAt(i)->cacheAccess().release();
720 }
721 }
722
723 this->validate();
724 }
725
purgeUnlockAndSafeCacheGpuResources()726 void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
727 fThreadSafeCache->dropUniqueRefs(nullptr);
728 // Sort the queue
729 fPurgeableQueue.sort();
730
731 //Make a list of the scratch resources to delete
732 SkTDArray<GrGpuResource*> scratchResources;
733 for (int i = 0; i < fPurgeableQueue.count(); i++) {
734 GrGpuResource* resource = fPurgeableQueue.at(i);
735 if (!resource) {
736 continue;
737 }
738 SkASSERT(resource->resourcePriv().isPurgeable());
739 if (!resource->getUniqueKey().isValid()) {
740 *scratchResources.append() = resource;
741 }
742 }
743
744 //Delete the scatch resource. This must be done as a separate pass
745 //to avoid messing up the sorted order of the queue
746 for (int i = 0; i <scratchResources.count(); i++) {
747 scratchResources.getAt(i)->cacheAccess().release();
748 }
749
750 this->validate();
751 }
752
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag & tag)753 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
754 // Sort the queue
755 fPurgeableQueue.sort();
756
757 //Make a list of the scratch resources to delete
758 SkTDArray<GrGpuResource*> scratchResources;
759 for (int i = 0; i < fPurgeableQueue.count(); i++) {
760 GrGpuResource* resource = fPurgeableQueue.at(i);
761 SkASSERT(resource->resourcePriv().isPurgeable());
762 if (tag.filter(resource->getResourceTag()) && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
763 *scratchResources.append() = resource;
764 }
765 }
766
767 //Delete the scatch resource. This must be done as a separate pass
768 //to avoid messing up the sorted order of the queue
769 for (int i = 0; i <scratchResources.count(); i++) {
770 scratchResources.getAt(i)->cacheAccess().release();
771 }
772
773 this->validate();
774 }
775
purgeToMakeHeadroom(size_t desiredHeadroomBytes)776 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
777 AutoValidate av(this);
778 if (desiredHeadroomBytes > fMaxBytes) {
779 return false;
780 }
781 if (this->wouldFit(desiredHeadroomBytes)) {
782 return true;
783 }
784 fPurgeableQueue.sort();
785
786 size_t projectedBudget = fBudgetedBytes;
787 int purgeCnt = 0;
788 for (int i = 0; i < fPurgeableQueue.count(); i++) {
789 GrGpuResource* resource = fPurgeableQueue.at(i);
790 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
791 projectedBudget -= resource->gpuMemorySize();
792 }
793 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
794 purgeCnt = i + 1;
795 break;
796 }
797 }
798 if (purgeCnt == 0) {
799 return false;
800 }
801
802 // Success! Release the resources.
803 // Copy to array first so we don't mess with the queue.
804 std::vector<GrGpuResource*> resources;
805 resources.reserve(purgeCnt);
806 for (int i = 0; i < purgeCnt; i++) {
807 resources.push_back(fPurgeableQueue.at(i));
808 }
809 for (GrGpuResource* resource : resources) {
810 resource->cacheAccess().release();
811 }
812 return true;
813 }
814
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)815 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
816
817 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
818 bool stillOverbudget = tmpByteBudget < fBytes;
819
820 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
821 // Sort the queue
822 fPurgeableQueue.sort();
823
824 // Make a list of the scratch resources to delete
825 SkTDArray<GrGpuResource*> scratchResources;
826 size_t scratchByteCount = 0;
827 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
828 GrGpuResource* resource = fPurgeableQueue.at(i);
829 SkASSERT(resource->resourcePriv().isPurgeable());
830 if (!resource->getUniqueKey().isValid()) {
831 *scratchResources.append() = resource;
832 scratchByteCount += resource->gpuMemorySize();
833 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
834 }
835 }
836
837 // Delete the scratch resources. This must be done as a separate pass
838 // to avoid messing up the sorted order of the queue
839 for (int i = 0; i < scratchResources.count(); i++) {
840 scratchResources.getAt(i)->cacheAccess().release();
841 }
842 stillOverbudget = tmpByteBudget < fBytes;
843
844 this->validate();
845 }
846
847 // Purge any remaining resources in LRU order
848 if (stillOverbudget) {
849 const size_t cachedByteCount = fMaxBytes;
850 fMaxBytes = tmpByteBudget;
851 this->purgeAsNeeded();
852 fMaxBytes = cachedByteCount;
853 }
854 }
855
requestsFlush() const856 bool GrResourceCache::requestsFlush() const {
857 return this->overBudget() && !fPurgeableQueue.count() &&
858 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
859 }
860
insertDelayedTextureUnref(GrTexture * texture)861 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
862 texture->ref();
863 uint32_t id = texture->uniqueID().asUInt();
864 if (auto* data = fTexturesAwaitingUnref.find(id)) {
865 data->addRef();
866 } else {
867 fTexturesAwaitingUnref.set(id, {texture});
868 }
869 }
870
processFreedGpuResources()871 void GrResourceCache::processFreedGpuResources() {
872 if (!fTexturesAwaitingUnref.count()) {
873 return;
874 }
875
876 SkTArray<GrTextureFreedMessage> msgs;
877 fFreedTextureInbox.poll(&msgs);
878 for (int i = 0; i < msgs.count(); ++i) {
879 SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
880 uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
881 TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
882 // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
883 // empty and we would have returned early above. Thus, any texture from a message should be
884 // in the list of fTexturesAwaitingUnref.
885 SkASSERT(info);
886 info->unref();
887 if (info->finished()) {
888 fTexturesAwaitingUnref.remove(id);
889 }
890 }
891 }
892
addToNonpurgeableArray(GrGpuResource * resource)893 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
894 int index = fNonpurgeableResources.count();
895 *fNonpurgeableResources.append() = resource;
896 *resource->cacheAccess().accessCacheIndex() = index;
897 }
898
removeFromNonpurgeableArray(GrGpuResource * resource)899 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
900 int* index = resource->cacheAccess().accessCacheIndex();
901 // Fill the hole we will create in the array with the tail object, adjust its index, and
902 // then pop the array
903 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
904 SkASSERT(fNonpurgeableResources[*index] == resource);
905 fNonpurgeableResources[*index] = tail;
906 *tail->cacheAccess().accessCacheIndex() = *index;
907 fNonpurgeableResources.pop();
908 SkDEBUGCODE(*index = -1);
909 }
910
getNextTimestamp()911 uint32_t GrResourceCache::getNextTimestamp() {
912 // If we wrap then all the existing resources will appear older than any resources that get
913 // a timestamp after the wrap.
914 if (0 == fTimestamp) {
915 int count = this->getResourceCount();
916 if (count) {
917 // Reset all the timestamps. We sort the resources by timestamp and then assign
918 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
919 // rare.
920 SkTDArray<GrGpuResource*> sortedPurgeableResources;
921 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
922
923 while (fPurgeableQueue.count()) {
924 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
925 fPurgeableQueue.pop();
926 }
927
928 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
929 CompareTimestamp);
930
931 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
932 // timestamp and assign new timestamps.
933 int currP = 0;
934 int currNP = 0;
935 while (currP < sortedPurgeableResources.count() &&
936 currNP < fNonpurgeableResources.count()) {
937 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
938 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
939 SkASSERT(tsP != tsNP);
940 if (tsP < tsNP) {
941 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
942 } else {
943 // Correct the index in the nonpurgeable array stored on the resource post-sort.
944 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
945 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
946 }
947 }
948
949 // The above loop ended when we hit the end of one array. Finish the other one.
950 while (currP < sortedPurgeableResources.count()) {
951 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
952 }
953 while (currNP < fNonpurgeableResources.count()) {
954 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
955 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
956 }
957
958 // Rebuild the queue.
959 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
960 fPurgeableQueue.insert(sortedPurgeableResources[i]);
961 }
962
963 this->validate();
964 SkASSERT(count == this->getResourceCount());
965
966 // count should be the next timestamp we return.
967 SkASSERT(fTimestamp == SkToU32(count));
968 }
969 }
970 return fTimestamp++;
971 }
972
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const973 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
974 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
975 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
976 }
977 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
978 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
979 }
980 }
981
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const982 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
983 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
984 if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
985 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
986 }
987 }
988 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
989 if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
990 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
991 }
992 }
993 }
994
995 #if GR_CACHE_STATS
getStats(Stats * stats) const996 void GrResourceCache::getStats(Stats* stats) const {
997 stats->reset();
998
999 stats->fTotal = this->getResourceCount();
1000 stats->fNumNonPurgeable = fNonpurgeableResources.count();
1001 stats->fNumPurgeable = fPurgeableQueue.count();
1002
1003 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1004 stats->update(fNonpurgeableResources[i]);
1005 }
1006 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1007 stats->update(fPurgeableQueue.at(i));
1008 }
1009 }
1010
1011 #if GR_TEST_UTILS
dumpStats(SkString * out) const1012 void GrResourceCache::dumpStats(SkString* out) const {
1013 this->validate();
1014
1015 Stats stats;
1016
1017 this->getStats(&stats);
1018
1019 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1020
1021 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1022 out->appendf("\t\tEntry Count: current %d"
1023 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1024 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1025 stats.fScratch, fHighWaterCount);
1026 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1027 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1028 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1029 }
1030
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const1031 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1032 SkTArray<double>* values) const {
1033 this->validate();
1034
1035 Stats stats;
1036 this->getStats(&stats);
1037
1038 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1039 }
1040 #endif // GR_TEST_UTILS
1041 #endif // GR_CACHE_STATS
1042
1043 #ifdef SK_DEBUG
validate() const1044 void GrResourceCache::validate() const {
1045 // Reduce the frequency of validations for large resource counts.
1046 static SkRandom gRandom;
1047 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1048 if (~mask && (gRandom.nextU() & mask)) {
1049 return;
1050 }
1051
1052 struct Stats {
1053 size_t fBytes;
1054 int fBudgetedCount;
1055 size_t fBudgetedBytes;
1056 int fLocked;
1057 int fScratch;
1058 int fCouldBeScratch;
1059 int fContent;
1060 const ScratchMap* fScratchMap;
1061 const UniqueHash* fUniqueHash;
1062
1063 Stats(const GrResourceCache* cache) {
1064 memset(this, 0, sizeof(*this));
1065 fScratchMap = &cache->fScratchMap;
1066 fUniqueHash = &cache->fUniqueHash;
1067 }
1068
1069 void update(GrGpuResource* resource) {
1070 fBytes += resource->gpuMemorySize();
1071
1072 if (!resource->resourcePriv().isPurgeable()) {
1073 ++fLocked;
1074 }
1075
1076 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1077 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1078
1079 if (resource->cacheAccess().isUsableAsScratch()) {
1080 SkASSERT(!uniqueKey.isValid());
1081 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1082 SkASSERT(!resource->cacheAccess().hasRef());
1083 ++fScratch;
1084 SkASSERT(fScratchMap->countForKey(scratchKey));
1085 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1086 } else if (scratchKey.isValid()) {
1087 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1088 uniqueKey.isValid() || resource->cacheAccess().hasRef());
1089 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1090 SkASSERT(!fScratchMap->has(resource, scratchKey));
1091 }
1092 if (uniqueKey.isValid()) {
1093 ++fContent;
1094 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1095 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1096 resource->resourcePriv().refsWrappedObjects());
1097 }
1098
1099 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1100 ++fBudgetedCount;
1101 fBudgetedBytes += resource->gpuMemorySize();
1102 }
1103 }
1104 };
1105
1106 {
1107 int count = 0;
1108 fScratchMap.foreach([&](const GrGpuResource& resource) {
1109 SkASSERT(resource.cacheAccess().isUsableAsScratch());
1110 count++;
1111 });
1112 SkASSERT(count == fScratchMap.count());
1113 }
1114
1115 Stats stats(this);
1116 size_t purgeableBytes = 0;
1117 int numBudgetedResourcesFlushWillMakePurgeable = 0;
1118
1119 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1120 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1121 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1122 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1123 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1124 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1125 !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1126 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1127 ++numBudgetedResourcesFlushWillMakePurgeable;
1128 }
1129 stats.update(fNonpurgeableResources[i]);
1130 }
1131 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1132 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1133 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1134 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1135 stats.update(fPurgeableQueue.at(i));
1136 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1137 }
1138
1139 SkASSERT(fCount == this->getResourceCount());
1140 SkASSERT(fBudgetedCount <= fCount);
1141 SkASSERT(fBudgetedBytes <= fBytes);
1142 SkASSERT(stats.fBytes == fBytes);
1143 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1144 numBudgetedResourcesFlushWillMakePurgeable);
1145 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1146 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1147 SkASSERT(purgeableBytes == fPurgeableBytes);
1148 #if GR_CACHE_STATS
1149 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1150 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1151 SkASSERT(fBytes <= fHighWaterBytes);
1152 SkASSERT(fCount <= fHighWaterCount);
1153 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1154 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1155 #endif
1156 SkASSERT(stats.fContent == fUniqueHash.count());
1157 SkASSERT(stats.fScratch == fScratchMap.count());
1158
1159 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1160 // calls. This will be fixed when subresource registration is explicit.
1161 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1162 // SkASSERT(!overBudget || locked == count || fPurging);
1163 }
1164 #endif // SK_DEBUG
1165
isInCache(const GrGpuResource * resource) const1166 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1167 int index = *resource->cacheAccess().accessCacheIndex();
1168 if (index < 0) {
1169 return false;
1170 }
1171 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1172 return true;
1173 }
1174 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1175 return true;
1176 }
1177 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1178 return false;
1179 }
1180
isInPurgeableCache(const GrGpuResource * resource) const1181 bool GrResourceCache::isInPurgeableCache(const GrGpuResource* resource) const {
1182 int index = *resource->cacheAccess().accessCacheIndex();
1183 if (index < 0) {
1184 return false;
1185 }
1186 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1187 return true;
1188 }
1189 SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1190 return false;
1191 }
1192
isInNonpurgeableCache(const GrGpuResource * resource) const1193 bool GrResourceCache::isInNonpurgeableCache(const GrGpuResource* resource) const {
1194 int index = *resource->cacheAccess().accessCacheIndex();
1195 if (index < 0) {
1196 return false;
1197 }
1198 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1199 return true;
1200 }
1201 SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1202 return false;
1203 }
1204
1205 #if GR_TEST_UTILS
1206
countUniqueKeysWithTag(const char * tag) const1207 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1208 int count = 0;
1209 fUniqueHash.foreach([&](const GrGpuResource& resource){
1210 if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1211 ++count;
1212 }
1213 });
1214 return count;
1215 }
1216
changeTimestamp(uint32_t newTimestamp)1217 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1218 fTimestamp = newTimestamp;
1219 }
1220
1221 #endif // GR_TEST_UTILS
1222