1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <vector>
11 #include "include/gpu/GrDirectContext.h"
12 #include "include/private/GrSingleOwner.h"
13 #include "include/private/SkTo.h"
14 #include "include/utils/SkRandom.h"
15 #include "src/core/SkMessageBus.h"
16 #include "src/core/SkOpts.h"
17 #include "src/core/SkScopeExit.h"
18 #include "src/core/SkTSort.h"
19 #include "src/gpu/GrCaps.h"
20 #include "src/gpu/GrDirectContextPriv.h"
21 #include "src/gpu/GrGpuResourceCacheAccess.h"
22 #include "src/gpu/GrProxyProvider.h"
23 #include "src/gpu/GrTexture.h"
24 #include "src/gpu/GrTextureProxyCacheAccess.h"
25 #include "src/gpu/GrThreadSafeCache.h"
26 #include "src/gpu/GrTracing.h"
27 #include "src/gpu/SkGr.h"
28
29 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
30
31 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
32
33 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
34
35 //////////////////////////////////////////////////////////////////////////////
36
GenerateResourceType()37 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
38 static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
39
40 int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
41 if (type > SkTo<int32_t>(UINT16_MAX)) {
42 SK_ABORT("Too many Resource Types");
43 }
44
45 return static_cast<ResourceType>(type);
46 }
47
GenerateDomain()48 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
49 static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
50
51 int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
52 if (domain > SkTo<int32_t>(UINT16_MAX)) {
53 SK_ABORT("Too many GrUniqueKey Domains");
54 }
55
56 return static_cast<Domain>(domain);
57 }
58
GrResourceKeyHash(const uint32_t * data,size_t size)59 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
60 return SkOpts::hash(data, size);
61 }
62
63 //////////////////////////////////////////////////////////////////////////////
64
65 class GrResourceCache::AutoValidate : ::SkNoncopyable {
66 public:
AutoValidate(GrResourceCache * cache)67 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()68 ~AutoValidate() { fCache->validate(); }
69 private:
70 GrResourceCache* fCache;
71 };
72
73 //////////////////////////////////////////////////////////////////////////////
74
75 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
76
TextureAwaitingUnref(GrTexture * texture)77 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
78 : fTexture(texture), fNumUnrefs(1) {}
79
TextureAwaitingUnref(TextureAwaitingUnref && that)80 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
81 fTexture = std::exchange(that.fTexture, nullptr);
82 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
83 }
84
operator =(TextureAwaitingUnref && that)85 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
86 TextureAwaitingUnref&& that) {
87 fTexture = std::exchange(that.fTexture, nullptr);
88 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
89 return *this;
90 }
91
~TextureAwaitingUnref()92 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
93 if (fTexture) {
94 for (int i = 0; i < fNumUnrefs; ++i) {
95 fTexture->unref();
96 }
97 }
98 }
99
addRef()100 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
101
unref()102 inline void GrResourceCache::TextureAwaitingUnref::unref() {
103 SkASSERT(fNumUnrefs > 0);
104 fTexture->unref();
105 --fNumUnrefs;
106 }
107
finished()108 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
109
110 //////////////////////////////////////////////////////////////////////////////
111
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)112 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
113 GrDirectContext::DirectContextID owningContextID,
114 uint32_t familyID)
115 : fInvalidUniqueKeyInbox(familyID)
116 , fFreedTextureInbox(owningContextID)
117 , fOwningContextID(owningContextID)
118 , fContextUniqueID(familyID)
119 , fSingleOwner(singleOwner) {
120 SkASSERT(owningContextID.isValid());
121 SkASSERT(familyID != SK_InvalidUniqueID);
122 }
123
~GrResourceCache()124 GrResourceCache::~GrResourceCache() {
125 this->releaseAll();
126 }
127
setLimit(size_t bytes)128 void GrResourceCache::setLimit(size_t bytes) {
129 fMaxBytes = bytes;
130 this->purgeAsNeeded();
131 }
132
insertResource(GrGpuResource * resource)133 void GrResourceCache::insertResource(GrGpuResource* resource) {
134 ASSERT_SINGLE_OWNER
135 SkASSERT(resource);
136 SkASSERT(!this->isInCache(resource));
137 SkASSERT(!resource->wasDestroyed());
138 SkASSERT(!resource->resourcePriv().isPurgeable());
139
140 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
141 // up iterating over all the resources that already have timestamps.
142 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
143
144 this->addToNonpurgeableArray(resource);
145
146 size_t size = resource->gpuMemorySize();
147 SkDEBUGCODE(++fCount;)
148 fBytes += size;
149 #if GR_CACHE_STATS
150 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
151 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
152 #endif
153 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
154 ++fBudgetedCount;
155 fBudgetedBytes += size;
156 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
157 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
158 #if GR_CACHE_STATS
159 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
160 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
161 #endif
162 }
163 SkASSERT(!resource->cacheAccess().isUsableAsScratch());
164 this->purgeAsNeeded();
165 }
166
removeResource(GrGpuResource * resource)167 void GrResourceCache::removeResource(GrGpuResource* resource) {
168 ASSERT_SINGLE_OWNER
169 this->validate();
170 SkASSERT(this->isInCache(resource));
171
172 size_t size = resource->gpuMemorySize();
173 if (resource->resourcePriv().isPurgeable()) {
174 fPurgeableQueue.remove(resource);
175 fPurgeableBytes -= size;
176 } else {
177 this->removeFromNonpurgeableArray(resource);
178 }
179
180 SkDEBUGCODE(--fCount;)
181 fBytes -= size;
182 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
183 --fBudgetedCount;
184 fBudgetedBytes -= size;
185 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
186 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
187 }
188
189 if (resource->cacheAccess().isUsableAsScratch()) {
190 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
191 }
192 if (resource->getUniqueKey().isValid()) {
193 fUniqueHash.remove(resource->getUniqueKey());
194 }
195 this->validate();
196 }
197
abandonAll()198 void GrResourceCache::abandonAll() {
199 AutoValidate av(this);
200
201 // We need to make sure to free any resources that were waiting on a free message but never
202 // received one.
203 fTexturesAwaitingUnref.reset();
204
205 while (fNonpurgeableResources.count()) {
206 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
207 SkASSERT(!back->wasDestroyed());
208 back->cacheAccess().abandon();
209 }
210
211 while (fPurgeableQueue.count()) {
212 GrGpuResource* top = fPurgeableQueue.peek();
213 SkASSERT(!top->wasDestroyed());
214 top->cacheAccess().abandon();
215 }
216
217 fThreadSafeCache->dropAllRefs();
218
219 SkASSERT(!fScratchMap.count());
220 SkASSERT(!fUniqueHash.count());
221 SkASSERT(!fCount);
222 SkASSERT(!this->getResourceCount());
223 SkASSERT(!fBytes);
224 SkASSERT(!fBudgetedCount);
225 SkASSERT(!fBudgetedBytes);
226 SkASSERT(!fPurgeableBytes);
227 SkASSERT(!fTexturesAwaitingUnref.count());
228 }
229
releaseAll()230 void GrResourceCache::releaseAll() {
231 AutoValidate av(this);
232
233 fThreadSafeCache->dropAllRefs();
234
235 this->processFreedGpuResources();
236
237 // We need to make sure to free any resources that were waiting on a free message but never
238 // received one.
239 fTexturesAwaitingUnref.reset();
240
241 SkASSERT(fProxyProvider); // better have called setProxyProvider
242 SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
243
244 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
245 // they also have a raw pointer back to this class (which is presumably going away)!
246 fProxyProvider->removeAllUniqueKeys();
247
248 while (fNonpurgeableResources.count()) {
249 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
250 SkASSERT(!back->wasDestroyed());
251 back->cacheAccess().release();
252 }
253
254 while (fPurgeableQueue.count()) {
255 GrGpuResource* top = fPurgeableQueue.peek();
256 SkASSERT(!top->wasDestroyed());
257 top->cacheAccess().release();
258 }
259
260 SkASSERT(!fScratchMap.count());
261 SkASSERT(!fUniqueHash.count());
262 SkASSERT(!fCount);
263 SkASSERT(!this->getResourceCount());
264 SkASSERT(!fBytes);
265 SkASSERT(!fBudgetedCount);
266 SkASSERT(!fBudgetedBytes);
267 SkASSERT(!fPurgeableBytes);
268 SkASSERT(!fTexturesAwaitingUnref.count());
269 }
270
refResource(GrGpuResource * resource)271 void GrResourceCache::refResource(GrGpuResource* resource) {
272 SkASSERT(resource);
273 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
274 if (resource->cacheAccess().hasRef()) {
275 resource->ref();
276 } else {
277 this->refAndMakeResourceMRU(resource);
278 }
279 this->validate();
280 }
281
282 class GrResourceCache::AvailableForScratchUse {
283 public:
AvailableForScratchUse()284 AvailableForScratchUse() { }
285
operator ()(const GrGpuResource * resource) const286 bool operator()(const GrGpuResource* resource) const {
287 // Everything that is in the scratch map should be usable as a
288 // scratch resource.
289 return true;
290 }
291 };
292
findAndRefScratchResource(const GrScratchKey & scratchKey)293 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
294 SkASSERT(scratchKey.isValid());
295
296 GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
297 if (resource) {
298 fScratchMap.remove(scratchKey, resource);
299 this->refAndMakeResourceMRU(resource);
300 this->validate();
301 }
302 return resource;
303 }
304
willRemoveScratchKey(const GrGpuResource * resource)305 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
306 ASSERT_SINGLE_OWNER
307 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
308 if (resource->cacheAccess().isUsableAsScratch()) {
309 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
310 }
311 }
312
removeUniqueKey(GrGpuResource * resource)313 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
314 ASSERT_SINGLE_OWNER
315 // Someone has a ref to this resource in order to have removed the key. When the ref count
316 // reaches zero we will get a ref cnt notification and figure out what to do with it.
317 if (resource->getUniqueKey().isValid()) {
318 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
319 fUniqueHash.remove(resource->getUniqueKey());
320 }
321 resource->cacheAccess().removeUniqueKey();
322 if (resource->cacheAccess().isUsableAsScratch()) {
323 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
324 }
325
326 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
327 // require purging. However, the resource must be ref'ed to get here and therefore can't
328 // be purgeable. We'll purge it when the refs reach zero.
329 SkASSERT(!resource->resourcePriv().isPurgeable());
330 this->validate();
331 }
332
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)333 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
334 ASSERT_SINGLE_OWNER
335 SkASSERT(resource);
336 SkASSERT(this->isInCache(resource));
337
338 // If another resource has the new key, remove its key then install the key on this resource.
339 if (newKey.isValid()) {
340 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
341 // If the old resource using the key is purgeable and is unreachable, then remove it.
342 if (!old->resourcePriv().getScratchKey().isValid() &&
343 old->resourcePriv().isPurgeable()) {
344 old->cacheAccess().release();
345 } else {
346 // removeUniqueKey expects an external owner of the resource.
347 this->removeUniqueKey(sk_ref_sp(old).get());
348 }
349 }
350 SkASSERT(nullptr == fUniqueHash.find(newKey));
351
352 // Remove the entry for this resource if it already has a unique key.
353 if (resource->getUniqueKey().isValid()) {
354 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
355 fUniqueHash.remove(resource->getUniqueKey());
356 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
357 } else {
358 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
359 // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
360 // unique key until after this check.
361 if (resource->cacheAccess().isUsableAsScratch()) {
362 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
363 }
364 }
365
366 resource->cacheAccess().setUniqueKey(newKey);
367 fUniqueHash.add(resource);
368 } else {
369 this->removeUniqueKey(resource);
370 }
371
372 this->validate();
373 }
374
refAndMakeResourceMRU(GrGpuResource * resource)375 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
376 ASSERT_SINGLE_OWNER
377 SkASSERT(resource);
378 SkASSERT(this->isInCache(resource));
379
380 if (resource->resourcePriv().isPurgeable()) {
381 // It's about to become unpurgeable.
382 fPurgeableBytes -= resource->gpuMemorySize();
383 fPurgeableQueue.remove(resource);
384 this->addToNonpurgeableArray(resource);
385 } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
386 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
387 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
388 fNumBudgetedResourcesFlushWillMakePurgeable--;
389 }
390 resource->cacheAccess().ref();
391
392 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
393 this->validate();
394 }
395
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)396 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
397 GrGpuResource::LastRemovedRef removedRef) {
398 ASSERT_SINGLE_OWNER
399 SkASSERT(resource);
400 SkASSERT(!resource->wasDestroyed());
401 SkASSERT(this->isInCache(resource));
402 // This resource should always be in the nonpurgeable array when this function is called. It
403 // will be moved to the queue if it is newly purgeable.
404 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
405
406 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
407 if (resource->cacheAccess().isUsableAsScratch()) {
408 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
409 }
410 }
411
412 if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
413 this->validate();
414 return;
415 }
416
417 #ifdef SK_DEBUG
418 // When the timestamp overflows validate() is called. validate() checks that resources in
419 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
420 // the purgeable queue happens just below in this function. So we mark it as an exception.
421 if (resource->resourcePriv().isPurgeable()) {
422 fNewlyPurgeableResourceForValidation = resource;
423 }
424 #endif
425 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
426 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
427
428 if (!resource->resourcePriv().isPurgeable() &&
429 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
430 ++fNumBudgetedResourcesFlushWillMakePurgeable;
431 }
432
433 if (!resource->resourcePriv().isPurgeable()) {
434 this->validate();
435 return;
436 }
437
438 this->removeFromNonpurgeableArray(resource);
439 fPurgeableQueue.insert(resource);
440 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
441 fPurgeableBytes += resource->gpuMemorySize();
442
443 bool hasUniqueKey = resource->getUniqueKey().isValid();
444
445 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
446
447 if (budgetedType == GrBudgetedType::kBudgeted) {
448 // Purge the resource immediately if we're over budget
449 // Also purge if the resource has neither a valid scratch key nor a unique key.
450 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
451 if (!this->overBudget() && hasKey) {
452 return;
453 }
454 } else {
455 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
456 // they can be reused again by the image connected to the unique key.
457 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
458 return;
459 }
460 // Check whether this resource could still be used as a scratch resource.
461 if (!resource->resourcePriv().refsWrappedObjects() &&
462 resource->resourcePriv().getScratchKey().isValid()) {
463 // We won't purge an existing resource to make room for this one.
464 if (this->wouldFit(resource->gpuMemorySize())) {
465 resource->resourcePriv().makeBudgeted();
466 return;
467 }
468 }
469 }
470
471 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
472 resource->cacheAccess().release();
473 // We should at least free this resource, perhaps dependent resources as well.
474 SkASSERT(this->getResourceCount() < beforeCount);
475 this->validate();
476 }
477
didChangeBudgetStatus(GrGpuResource * resource)478 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
479 ASSERT_SINGLE_OWNER
480 SkASSERT(resource);
481 SkASSERT(this->isInCache(resource));
482
483 size_t size = resource->gpuMemorySize();
484 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
485 // resource become purgeable. However, we should never allow that transition. Wrapped
486 // resources are the only resources that can be in that state and they aren't allowed to
487 // transition from one budgeted state to another.
488 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
489 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
490 ++fBudgetedCount;
491 fBudgetedBytes += size;
492 #if GR_CACHE_STATS
493 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
494 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
495 #endif
496 if (!resource->resourcePriv().isPurgeable() &&
497 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
498 ++fNumBudgetedResourcesFlushWillMakePurgeable;
499 }
500 if (resource->cacheAccess().isUsableAsScratch()) {
501 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
502 }
503 this->purgeAsNeeded();
504 } else {
505 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
506 --fBudgetedCount;
507 fBudgetedBytes -= size;
508 if (!resource->resourcePriv().isPurgeable() &&
509 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
510 --fNumBudgetedResourcesFlushWillMakePurgeable;
511 }
512 if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
513 resource->resourcePriv().getScratchKey().isValid()) {
514 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
515 }
516 }
517 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
518 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
519 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
520
521 this->validate();
522 }
523
purgeAsNeeded()524 void GrResourceCache::purgeAsNeeded() {
525 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
526 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
527 if (invalidKeyMsgs.count()) {
528 SkASSERT(fProxyProvider);
529
530 for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
531 if (invalidKeyMsgs[i].inThreadSafeCache()) {
532 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
533 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
534 } else {
535 fProxyProvider->processInvalidUniqueKey(
536 invalidKeyMsgs[i].key(), nullptr,
537 GrProxyProvider::InvalidateGPUResource::kYes);
538 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
539 }
540 }
541 }
542
543 this->processFreedGpuResources();
544
545 bool stillOverbudget = this->overBudget();
546 while (stillOverbudget && fPurgeableQueue.count()) {
547 GrGpuResource* resource = fPurgeableQueue.peek();
548 SkASSERT(resource->resourcePriv().isPurgeable());
549 resource->cacheAccess().release();
550 stillOverbudget = this->overBudget();
551 }
552
553 if (stillOverbudget) {
554 fThreadSafeCache->dropUniqueRefs(this);
555
556 while (stillOverbudget && fPurgeableQueue.count()) {
557 GrGpuResource* resource = fPurgeableQueue.peek();
558 SkASSERT(resource->resourcePriv().isPurgeable());
559 resource->cacheAccess().release();
560 stillOverbudget = this->overBudget();
561 }
562 }
563
564 this->validate();
565 }
566
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)567 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
568 bool scratchResourcesOnly) {
569
570 if (!scratchResourcesOnly) {
571 if (purgeTime) {
572 fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
573 } else {
574 fThreadSafeCache->dropUniqueRefs(nullptr);
575 }
576
577 // We could disable maintaining the heap property here, but it would add a lot of
578 // complexity. Moreover, this is rarely called.
579 while (fPurgeableQueue.count()) {
580 GrGpuResource* resource = fPurgeableQueue.peek();
581
582 const GrStdSteadyClock::time_point resourceTime =
583 resource->cacheAccess().timeWhenResourceBecamePurgeable();
584 if (purgeTime && resourceTime >= *purgeTime) {
585 // Resources were given both LRU timestamps and tagged with a frame number when
586 // they first became purgeable. The LRU timestamp won't change again until the
587 // resource is made non-purgeable again. So, at this point all the remaining
588 // resources in the timestamp-sorted queue will have a frame number >= to this
589 // one.
590 break;
591 }
592
593 SkASSERT(resource->resourcePriv().isPurgeable());
594 resource->cacheAccess().release();
595 }
596 } else {
597 // Early out if the very first item is too new to purge to avoid sorting the queue when
598 // nothing will be deleted.
599 if (purgeTime && fPurgeableQueue.count() &&
600 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
601 return;
602 }
603
604 // Sort the queue
605 fPurgeableQueue.sort();
606
607 // Make a list of the scratch resources to delete
608 SkTDArray<GrGpuResource*> scratchResources;
609 for (int i = 0; i < fPurgeableQueue.count(); i++) {
610 GrGpuResource* resource = fPurgeableQueue.at(i);
611
612 const GrStdSteadyClock::time_point resourceTime =
613 resource->cacheAccess().timeWhenResourceBecamePurgeable();
614 if (purgeTime && resourceTime >= *purgeTime) {
615 // scratch or not, all later iterations will be too recently used to purge.
616 break;
617 }
618 SkASSERT(resource->resourcePriv().isPurgeable());
619 if (!resource->getUniqueKey().isValid()) {
620 *scratchResources.append() = resource;
621 }
622 }
623
624 // Delete the scratch resources. This must be done as a separate pass
625 // to avoid messing up the sorted order of the queue
626 for (int i = 0; i < scratchResources.count(); i++) {
627 scratchResources.getAt(i)->cacheAccess().release();
628 }
629 }
630
631 this->validate();
632 }
633
purgeToMakeHeadroom(size_t desiredHeadroomBytes)634 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
635 AutoValidate av(this);
636 if (desiredHeadroomBytes > fMaxBytes) {
637 return false;
638 }
639 if (this->wouldFit(desiredHeadroomBytes)) {
640 return true;
641 }
642 fPurgeableQueue.sort();
643
644 size_t projectedBudget = fBudgetedBytes;
645 int purgeCnt = 0;
646 for (int i = 0; i < fPurgeableQueue.count(); i++) {
647 GrGpuResource* resource = fPurgeableQueue.at(i);
648 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
649 projectedBudget -= resource->gpuMemorySize();
650 }
651 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
652 purgeCnt = i + 1;
653 break;
654 }
655 }
656 if (purgeCnt == 0) {
657 return false;
658 }
659
660 // Success! Release the resources.
661 // Copy to array first so we don't mess with the queue.
662 std::vector<GrGpuResource*> resources;
663 resources.reserve(purgeCnt);
664 for (int i = 0; i < purgeCnt; i++) {
665 resources.push_back(fPurgeableQueue.at(i));
666 }
667 for (GrGpuResource* resource : resources) {
668 resource->cacheAccess().release();
669 }
670 return true;
671 }
672
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)673 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
674
675 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
676 bool stillOverbudget = tmpByteBudget < fBytes;
677
678 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
679 // Sort the queue
680 fPurgeableQueue.sort();
681
682 // Make a list of the scratch resources to delete
683 SkTDArray<GrGpuResource*> scratchResources;
684 size_t scratchByteCount = 0;
685 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
686 GrGpuResource* resource = fPurgeableQueue.at(i);
687 SkASSERT(resource->resourcePriv().isPurgeable());
688 if (!resource->getUniqueKey().isValid()) {
689 *scratchResources.append() = resource;
690 scratchByteCount += resource->gpuMemorySize();
691 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
692 }
693 }
694
695 // Delete the scratch resources. This must be done as a separate pass
696 // to avoid messing up the sorted order of the queue
697 for (int i = 0; i < scratchResources.count(); i++) {
698 scratchResources.getAt(i)->cacheAccess().release();
699 }
700 stillOverbudget = tmpByteBudget < fBytes;
701
702 this->validate();
703 }
704
705 // Purge any remaining resources in LRU order
706 if (stillOverbudget) {
707 const size_t cachedByteCount = fMaxBytes;
708 fMaxBytes = tmpByteBudget;
709 this->purgeAsNeeded();
710 fMaxBytes = cachedByteCount;
711 }
712 }
713
requestsFlush() const714 bool GrResourceCache::requestsFlush() const {
715 return this->overBudget() && !fPurgeableQueue.count() &&
716 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
717 }
718
insertDelayedTextureUnref(GrTexture * texture)719 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
720 texture->ref();
721 uint32_t id = texture->uniqueID().asUInt();
722 if (auto* data = fTexturesAwaitingUnref.find(id)) {
723 data->addRef();
724 } else {
725 fTexturesAwaitingUnref.set(id, {texture});
726 }
727 }
728
processFreedGpuResources()729 void GrResourceCache::processFreedGpuResources() {
730 if (!fTexturesAwaitingUnref.count()) {
731 return;
732 }
733
734 SkTArray<GrTextureFreedMessage> msgs;
735 fFreedTextureInbox.poll(&msgs);
736 for (int i = 0; i < msgs.count(); ++i) {
737 SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
738 uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
739 TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
740 // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
741 // empty and we would have returned early above. Thus, any texture from a message should be
742 // in the list of fTexturesAwaitingUnref.
743 SkASSERT(info);
744 info->unref();
745 if (info->finished()) {
746 fTexturesAwaitingUnref.remove(id);
747 }
748 }
749 }
750
addToNonpurgeableArray(GrGpuResource * resource)751 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
752 int index = fNonpurgeableResources.count();
753 *fNonpurgeableResources.append() = resource;
754 *resource->cacheAccess().accessCacheIndex() = index;
755 }
756
removeFromNonpurgeableArray(GrGpuResource * resource)757 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
758 int* index = resource->cacheAccess().accessCacheIndex();
759 // Fill the hole we will create in the array with the tail object, adjust its index, and
760 // then pop the array
761 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
762 SkASSERT(fNonpurgeableResources[*index] == resource);
763 fNonpurgeableResources[*index] = tail;
764 *tail->cacheAccess().accessCacheIndex() = *index;
765 fNonpurgeableResources.pop();
766 SkDEBUGCODE(*index = -1);
767 }
768
getNextTimestamp()769 uint32_t GrResourceCache::getNextTimestamp() {
770 // If we wrap then all the existing resources will appear older than any resources that get
771 // a timestamp after the wrap.
772 if (0 == fTimestamp) {
773 int count = this->getResourceCount();
774 if (count) {
775 // Reset all the timestamps. We sort the resources by timestamp and then assign
776 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
777 // rare.
778 SkTDArray<GrGpuResource*> sortedPurgeableResources;
779 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
780
781 while (fPurgeableQueue.count()) {
782 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
783 fPurgeableQueue.pop();
784 }
785
786 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
787 CompareTimestamp);
788
789 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
790 // timestamp and assign new timestamps.
791 int currP = 0;
792 int currNP = 0;
793 while (currP < sortedPurgeableResources.count() &&
794 currNP < fNonpurgeableResources.count()) {
795 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
796 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
797 SkASSERT(tsP != tsNP);
798 if (tsP < tsNP) {
799 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
800 } else {
801 // Correct the index in the nonpurgeable array stored on the resource post-sort.
802 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
803 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
804 }
805 }
806
807 // The above loop ended when we hit the end of one array. Finish the other one.
808 while (currP < sortedPurgeableResources.count()) {
809 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
810 }
811 while (currNP < fNonpurgeableResources.count()) {
812 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
813 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
814 }
815
816 // Rebuild the queue.
817 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
818 fPurgeableQueue.insert(sortedPurgeableResources[i]);
819 }
820
821 this->validate();
822 SkASSERT(count == this->getResourceCount());
823
824 // count should be the next timestamp we return.
825 SkASSERT(fTimestamp == SkToU32(count));
826 }
827 }
828 return fTimestamp++;
829 }
830
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const831 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
832 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
833 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
834 }
835 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
836 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
837 }
838 }
839
840 #if GR_CACHE_STATS
getStats(Stats * stats) const841 void GrResourceCache::getStats(Stats* stats) const {
842 stats->reset();
843
844 stats->fTotal = this->getResourceCount();
845 stats->fNumNonPurgeable = fNonpurgeableResources.count();
846 stats->fNumPurgeable = fPurgeableQueue.count();
847
848 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
849 stats->update(fNonpurgeableResources[i]);
850 }
851 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
852 stats->update(fPurgeableQueue.at(i));
853 }
854 }
855
856 #if GR_TEST_UTILS
dumpStats(SkString * out) const857 void GrResourceCache::dumpStats(SkString* out) const {
858 this->validate();
859
860 Stats stats;
861
862 this->getStats(&stats);
863
864 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
865
866 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
867 out->appendf("\t\tEntry Count: current %d"
868 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
869 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
870 stats.fScratch, fHighWaterCount);
871 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
872 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
873 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
874 }
875
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const876 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
877 SkTArray<double>* values) const {
878 this->validate();
879
880 Stats stats;
881 this->getStats(&stats);
882
883 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
884 }
885 #endif
886
887 #endif
888
889 #ifdef SK_DEBUG
validate() const890 void GrResourceCache::validate() const {
891 // Reduce the frequency of validations for large resource counts.
892 static SkRandom gRandom;
893 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
894 if (~mask && (gRandom.nextU() & mask)) {
895 return;
896 }
897
898 struct Stats {
899 size_t fBytes;
900 int fBudgetedCount;
901 size_t fBudgetedBytes;
902 int fLocked;
903 int fScratch;
904 int fCouldBeScratch;
905 int fContent;
906 const ScratchMap* fScratchMap;
907 const UniqueHash* fUniqueHash;
908
909 Stats(const GrResourceCache* cache) {
910 memset(this, 0, sizeof(*this));
911 fScratchMap = &cache->fScratchMap;
912 fUniqueHash = &cache->fUniqueHash;
913 }
914
915 void update(GrGpuResource* resource) {
916 fBytes += resource->gpuMemorySize();
917
918 if (!resource->resourcePriv().isPurgeable()) {
919 ++fLocked;
920 }
921
922 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
923 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
924
925 if (resource->cacheAccess().isUsableAsScratch()) {
926 SkASSERT(!uniqueKey.isValid());
927 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
928 SkASSERT(!resource->cacheAccess().hasRef());
929 ++fScratch;
930 SkASSERT(fScratchMap->countForKey(scratchKey));
931 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
932 } else if (scratchKey.isValid()) {
933 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
934 uniqueKey.isValid() || resource->cacheAccess().hasRef());
935 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
936 SkASSERT(!fScratchMap->has(resource, scratchKey));
937 }
938 if (uniqueKey.isValid()) {
939 ++fContent;
940 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
941 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
942 resource->resourcePriv().refsWrappedObjects());
943 }
944
945 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
946 ++fBudgetedCount;
947 fBudgetedBytes += resource->gpuMemorySize();
948 }
949 }
950 };
951
952 {
953 int count = 0;
954 fScratchMap.foreach([&](const GrGpuResource& resource) {
955 SkASSERT(resource.cacheAccess().isUsableAsScratch());
956 count++;
957 });
958 SkASSERT(count == fScratchMap.count());
959 }
960
961 Stats stats(this);
962 size_t purgeableBytes = 0;
963 int numBudgetedResourcesFlushWillMakePurgeable = 0;
964
965 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
966 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
967 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
968 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
969 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
970 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
971 !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
972 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
973 ++numBudgetedResourcesFlushWillMakePurgeable;
974 }
975 stats.update(fNonpurgeableResources[i]);
976 }
977 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
978 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
979 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
980 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
981 stats.update(fPurgeableQueue.at(i));
982 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
983 }
984
985 SkASSERT(fCount == this->getResourceCount());
986 SkASSERT(fBudgetedCount <= fCount);
987 SkASSERT(fBudgetedBytes <= fBytes);
988 SkASSERT(stats.fBytes == fBytes);
989 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
990 numBudgetedResourcesFlushWillMakePurgeable);
991 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
992 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
993 SkASSERT(purgeableBytes == fPurgeableBytes);
994 #if GR_CACHE_STATS
995 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
996 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
997 SkASSERT(fBytes <= fHighWaterBytes);
998 SkASSERT(fCount <= fHighWaterCount);
999 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1000 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1001 #endif
1002 SkASSERT(stats.fContent == fUniqueHash.count());
1003 SkASSERT(stats.fScratch == fScratchMap.count());
1004
1005 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1006 // calls. This will be fixed when subresource registration is explicit.
1007 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1008 // SkASSERT(!overBudget || locked == count || fPurging);
1009 }
1010
isInCache(const GrGpuResource * resource) const1011 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1012 int index = *resource->cacheAccess().accessCacheIndex();
1013 if (index < 0) {
1014 return false;
1015 }
1016 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1017 return true;
1018 }
1019 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1020 return true;
1021 }
1022 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1023 return false;
1024 }
1025
1026 #endif
1027