1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/GrResourceCache.h"
9 #include <atomic>
10 #include <vector>
11 #include "include/gpu/GrDirectContext.h"
12 #include "include/private/base/SingleOwner.h"
13 #include "include/private/base/SkTo.h"
14 #include "src/base/SkRandom.h"
15 #include "src/base/SkScopeExit.h"
16 #include "src/base/SkTSort.h"
17 #include "src/core/SkMessageBus.h"
18 #include "src/gpu/ganesh/GrCaps.h"
19 #include "src/gpu/ganesh/GrDirectContextPriv.h"
20 #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h"
21 #include "src/gpu/ganesh/GrProxyProvider.h"
22 #include "src/gpu/ganesh/GrTexture.h"
23 #include "src/gpu/ganesh/GrTextureProxyCacheAccess.h"
24 #include "src/gpu/ganesh/GrThreadSafeCache.h"
25 #include "src/gpu/ganesh/GrTracing.h"
26 #include "src/gpu/ganesh/SkGr.h"
27
28 using namespace skia_private;
29
30 DECLARE_SKMESSAGEBUS_MESSAGE(skgpu::UniqueKeyInvalidatedMessage, uint32_t, true)
31
32 DECLARE_SKMESSAGEBUS_MESSAGE(GrResourceCache::UnrefResourceMessage,
33 GrDirectContext::DirectContextID,
34 /*AllowCopyableMessage=*/false)
35
36 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
37
38 //////////////////////////////////////////////////////////////////////////////
39
40 class GrResourceCache::AutoValidate : ::SkNoncopyable {
41 public:
AutoValidate(GrResourceCache * cache)42 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()43 ~AutoValidate() { fCache->validate(); }
44 private:
45 GrResourceCache* fCache;
46 };
47
48 //////////////////////////////////////////////////////////////////////////////
49
GrResourceCache(skgpu::SingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)50 GrResourceCache::GrResourceCache(skgpu::SingleOwner* singleOwner,
51 GrDirectContext::DirectContextID owningContextID,
52 uint32_t familyID)
53 : fInvalidUniqueKeyInbox(familyID)
54 , fUnrefResourceInbox(owningContextID)
55 , fOwningContextID(owningContextID)
56 , fContextUniqueID(familyID)
57 , fSingleOwner(singleOwner) {
58 SkASSERT(owningContextID.isValid());
59 SkASSERT(familyID != SK_InvalidUniqueID);
60 }
61
~GrResourceCache()62 GrResourceCache::~GrResourceCache() {
63 this->releaseAll();
64 }
65
setLimit(size_t bytes)66 void GrResourceCache::setLimit(size_t bytes) {
67 fMaxBytes = bytes;
68 this->purgeAsNeeded();
69 }
70
insertResource(GrGpuResource * resource)71 void GrResourceCache::insertResource(GrGpuResource* resource) {
72 ASSERT_SINGLE_OWNER
73 SkASSERT(resource);
74 SkASSERT(!this->isInCache(resource));
75 SkASSERT(!resource->wasDestroyed());
76 SkASSERT(!resource->resourcePriv().isPurgeable());
77
78 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
79 // up iterating over all the resources that already have timestamps.
80 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
81
82 this->addToNonpurgeableArray(resource);
83
84 size_t size = resource->gpuMemorySize();
85 SkDEBUGCODE(++fCount;)
86 fBytes += size;
87 #if GR_CACHE_STATS
88 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
89 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
90 #endif
91 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
92 ++fBudgetedCount;
93 fBudgetedBytes += size;
94 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
95 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
96 #if GR_CACHE_STATS
97 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
98 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
99 #endif
100 }
101 SkASSERT(!resource->cacheAccess().isUsableAsScratch());
102 this->purgeAsNeeded();
103 }
104
removeResource(GrGpuResource * resource)105 void GrResourceCache::removeResource(GrGpuResource* resource) {
106 ASSERT_SINGLE_OWNER
107 this->validate();
108 SkASSERT(this->isInCache(resource));
109
110 size_t size = resource->gpuMemorySize();
111 if (resource->resourcePriv().isPurgeable()) {
112 fPurgeableQueue.remove(resource);
113 fPurgeableBytes -= size;
114 } else {
115 this->removeFromNonpurgeableArray(resource);
116 }
117
118 SkDEBUGCODE(--fCount;)
119 fBytes -= size;
120 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
121 --fBudgetedCount;
122 fBudgetedBytes -= size;
123 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
124 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
125 }
126
127 if (resource->cacheAccess().isUsableAsScratch()) {
128 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
129 }
130 if (resource->getUniqueKey().isValid()) {
131 fUniqueHash.remove(resource->getUniqueKey());
132 }
133 this->validate();
134 }
135
abandonAll()136 void GrResourceCache::abandonAll() {
137 AutoValidate av(this);
138
139 while (!fNonpurgeableResources.empty()) {
140 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
141 SkASSERT(!back->wasDestroyed());
142 back->cacheAccess().abandon();
143 }
144
145 while (fPurgeableQueue.count()) {
146 GrGpuResource* top = fPurgeableQueue.peek();
147 SkASSERT(!top->wasDestroyed());
148 top->cacheAccess().abandon();
149 }
150
151 fThreadSafeCache->dropAllRefs();
152
153 SkASSERT(!fScratchMap.count());
154 SkASSERT(!fUniqueHash.count());
155 SkASSERT(!fCount);
156 SkASSERT(!this->getResourceCount());
157 SkASSERT(!fBytes);
158 SkASSERT(!fBudgetedCount);
159 SkASSERT(!fBudgetedBytes);
160 SkASSERT(!fPurgeableBytes);
161 }
162
releaseAll()163 void GrResourceCache::releaseAll() {
164 AutoValidate av(this);
165
166 fThreadSafeCache->dropAllRefs();
167
168 this->processFreedGpuResources();
169
170 SkASSERT(fProxyProvider); // better have called setProxyProvider
171 SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
172
173 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
174 // they also have a raw pointer back to this class (which is presumably going away)!
175 fProxyProvider->removeAllUniqueKeys();
176
177 while (!fNonpurgeableResources.empty()) {
178 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
179 SkASSERT(!back->wasDestroyed());
180 back->cacheAccess().release();
181 }
182
183 while (fPurgeableQueue.count()) {
184 GrGpuResource* top = fPurgeableQueue.peek();
185 SkASSERT(!top->wasDestroyed());
186 top->cacheAccess().release();
187 }
188
189 SkASSERT(!fScratchMap.count());
190 SkASSERT(!fUniqueHash.count());
191 SkASSERT(!fCount);
192 SkASSERT(!this->getResourceCount());
193 SkASSERT(!fBytes);
194 SkASSERT(!fBudgetedCount);
195 SkASSERT(!fBudgetedBytes);
196 SkASSERT(!fPurgeableBytes);
197 }
198
refResource(GrGpuResource * resource)199 void GrResourceCache::refResource(GrGpuResource* resource) {
200 SkASSERT(resource);
201 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
202 if (resource->cacheAccess().hasRef()) {
203 resource->ref();
204 } else {
205 this->refAndMakeResourceMRU(resource);
206 }
207 this->validate();
208 }
209
findAndRefScratchResource(const skgpu::ScratchKey & scratchKey)210 GrGpuResource* GrResourceCache::findAndRefScratchResource(const skgpu::ScratchKey& scratchKey) {
211 SkASSERT(scratchKey.isValid());
212
213 GrGpuResource* resource = fScratchMap.find(scratchKey);
214 if (resource) {
215 fScratchMap.remove(scratchKey, resource);
216 this->refAndMakeResourceMRU(resource);
217 this->validate();
218 }
219 return resource;
220 }
221
willRemoveScratchKey(const GrGpuResource * resource)222 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
223 ASSERT_SINGLE_OWNER
224 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
225 if (resource->cacheAccess().isUsableAsScratch()) {
226 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
227 }
228 }
229
removeUniqueKey(GrGpuResource * resource)230 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
231 ASSERT_SINGLE_OWNER
232 // Someone has a ref to this resource in order to have removed the key. When the ref count
233 // reaches zero we will get a ref cnt notification and figure out what to do with it.
234 if (resource->getUniqueKey().isValid()) {
235 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
236 fUniqueHash.remove(resource->getUniqueKey());
237 }
238 resource->cacheAccess().removeUniqueKey();
239 if (resource->cacheAccess().isUsableAsScratch()) {
240 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
241 }
242
243 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
244 // require purging. However, the resource must be ref'ed to get here and therefore can't
245 // be purgeable. We'll purge it when the refs reach zero.
246 SkASSERT(!resource->resourcePriv().isPurgeable());
247 this->validate();
248 }
249
changeUniqueKey(GrGpuResource * resource,const skgpu::UniqueKey & newKey)250 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
251 ASSERT_SINGLE_OWNER
252 SkASSERT(resource);
253 SkASSERT(this->isInCache(resource));
254
255 // If another resource has the new key, remove its key then install the key on this resource.
256 if (newKey.isValid()) {
257 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
258 // If the old resource using the key is purgeable and is unreachable, then remove it.
259 if (!old->resourcePriv().getScratchKey().isValid() &&
260 old->resourcePriv().isPurgeable()) {
261 old->cacheAccess().release();
262 } else {
263 // removeUniqueKey expects an external owner of the resource.
264 this->removeUniqueKey(sk_ref_sp(old).get());
265 }
266 }
267 SkASSERT(nullptr == fUniqueHash.find(newKey));
268
269 // Remove the entry for this resource if it already has a unique key.
270 if (resource->getUniqueKey().isValid()) {
271 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
272 fUniqueHash.remove(resource->getUniqueKey());
273 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
274 } else {
275 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
276 // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
277 // unique key until after this check.
278 if (resource->cacheAccess().isUsableAsScratch()) {
279 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
280 }
281 }
282
283 resource->cacheAccess().setUniqueKey(newKey);
284 fUniqueHash.add(resource);
285 } else {
286 this->removeUniqueKey(resource);
287 }
288
289 this->validate();
290 }
291
refAndMakeResourceMRU(GrGpuResource * resource)292 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
293 ASSERT_SINGLE_OWNER
294 SkASSERT(resource);
295 SkASSERT(this->isInCache(resource));
296
297 if (resource->resourcePriv().isPurgeable()) {
298 // It's about to become unpurgeable.
299 fPurgeableBytes -= resource->gpuMemorySize();
300 fPurgeableQueue.remove(resource);
301 this->addToNonpurgeableArray(resource);
302 } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
303 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
304 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
305 fNumBudgetedResourcesFlushWillMakePurgeable--;
306 }
307 resource->cacheAccess().ref();
308
309 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
310 this->validate();
311 }
312
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)313 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
314 GrGpuResource::LastRemovedRef removedRef) {
315 ASSERT_SINGLE_OWNER
316 SkASSERT(resource);
317 SkASSERT(!resource->wasDestroyed());
318 SkASSERT(this->isInCache(resource));
319 // This resource should always be in the nonpurgeable array when this function is called. It
320 // will be moved to the queue if it is newly purgeable.
321 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
322
323 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
324 if (resource->cacheAccess().isUsableAsScratch()) {
325 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
326 }
327 }
328
329 if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
330 this->validate();
331 return;
332 }
333
334 #ifdef SK_DEBUG
335 // When the timestamp overflows validate() is called. validate() checks that resources in
336 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
337 // the purgeable queue happens just below in this function. So we mark it as an exception.
338 if (resource->resourcePriv().isPurgeable()) {
339 fNewlyPurgeableResourceForValidation = resource;
340 }
341 #endif
342 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
343 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
344
345 if (!resource->resourcePriv().isPurgeable() &&
346 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
347 ++fNumBudgetedResourcesFlushWillMakePurgeable;
348 }
349
350 if (!resource->resourcePriv().isPurgeable()) {
351 this->validate();
352 return;
353 }
354
355 this->removeFromNonpurgeableArray(resource);
356 fPurgeableQueue.insert(resource);
357 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
358 fPurgeableBytes += resource->gpuMemorySize();
359
360 bool hasUniqueKey = resource->getUniqueKey().isValid();
361
362 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
363
364 if (budgetedType == GrBudgetedType::kBudgeted) {
365 // Purge the resource immediately if we're over budget
366 // Also purge if the resource has neither a valid scratch key nor a unique key.
367 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
368 if (!this->overBudget() && hasKey) {
369 return;
370 }
371 } else {
372 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
373 // they can be reused again by the image connected to the unique key.
374 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
375 return;
376 }
377 // Check whether this resource could still be used as a scratch resource.
378 if (!resource->resourcePriv().refsWrappedObjects() &&
379 resource->resourcePriv().getScratchKey().isValid()) {
380 // We won't purge an existing resource to make room for this one.
381 if (this->wouldFit(resource->gpuMemorySize())) {
382 resource->resourcePriv().makeBudgeted();
383 return;
384 }
385 }
386 }
387
388 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
389 resource->cacheAccess().release();
390 // We should at least free this resource, perhaps dependent resources as well.
391 SkASSERT(this->getResourceCount() < beforeCount);
392 this->validate();
393 }
394
didChangeBudgetStatus(GrGpuResource * resource)395 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
396 ASSERT_SINGLE_OWNER
397 SkASSERT(resource);
398 SkASSERT(this->isInCache(resource));
399
400 size_t size = resource->gpuMemorySize();
401 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
402 // resource become purgeable. However, we should never allow that transition. Wrapped
403 // resources are the only resources that can be in that state and they aren't allowed to
404 // transition from one budgeted state to another.
405 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
406 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
407 ++fBudgetedCount;
408 fBudgetedBytes += size;
409 #if GR_CACHE_STATS
410 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
411 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
412 #endif
413 if (!resource->resourcePriv().isPurgeable() &&
414 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
415 ++fNumBudgetedResourcesFlushWillMakePurgeable;
416 }
417 if (resource->cacheAccess().isUsableAsScratch()) {
418 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
419 }
420 this->purgeAsNeeded();
421 } else {
422 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
423 --fBudgetedCount;
424 fBudgetedBytes -= size;
425 if (!resource->resourcePriv().isPurgeable() &&
426 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
427 --fNumBudgetedResourcesFlushWillMakePurgeable;
428 }
429 if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
430 resource->resourcePriv().getScratchKey().isValid()) {
431 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
432 }
433 }
434 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
435 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
436 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
437
438 this->validate();
439 }
440
purgeAsNeeded()441 void GrResourceCache::purgeAsNeeded() {
442 TArray<skgpu::UniqueKeyInvalidatedMessage> invalidKeyMsgs;
443 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
444 if (!invalidKeyMsgs.empty()) {
445 SkASSERT(fProxyProvider);
446
447 for (int i = 0; i < invalidKeyMsgs.size(); ++i) {
448 if (invalidKeyMsgs[i].inThreadSafeCache()) {
449 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
450 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
451 } else {
452 fProxyProvider->processInvalidUniqueKey(
453 invalidKeyMsgs[i].key(), nullptr,
454 GrProxyProvider::InvalidateGPUResource::kYes);
455 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
456 }
457 }
458 }
459
460 this->processFreedGpuResources();
461
462 bool stillOverbudget = this->overBudget();
463 while (stillOverbudget && fPurgeableQueue.count()) {
464 GrGpuResource* resource = fPurgeableQueue.peek();
465 SkASSERT(resource->resourcePriv().isPurgeable());
466 resource->cacheAccess().release();
467 stillOverbudget = this->overBudget();
468 }
469
470 if (stillOverbudget) {
471 fThreadSafeCache->dropUniqueRefs(this);
472
473 stillOverbudget = this->overBudget();
474 while (stillOverbudget && fPurgeableQueue.count()) {
475 GrGpuResource* resource = fPurgeableQueue.peek();
476 SkASSERT(resource->resourcePriv().isPurgeable());
477 resource->cacheAccess().release();
478 stillOverbudget = this->overBudget();
479 }
480 }
481
482 this->validate();
483 }
484
purgeUnlockedResources(const skgpu::StdSteadyClock::time_point * purgeTime,GrPurgeResourceOptions opts)485 void GrResourceCache::purgeUnlockedResources(const skgpu::StdSteadyClock::time_point* purgeTime,
486 GrPurgeResourceOptions opts) {
487 if (opts == GrPurgeResourceOptions::kAllResources) {
488 if (purgeTime) {
489 fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
490 } else {
491 fThreadSafeCache->dropUniqueRefs(nullptr);
492 }
493
494 // We could disable maintaining the heap property here, but it would add a lot of
495 // complexity. Moreover, this is rarely called.
496 while (fPurgeableQueue.count()) {
497 GrGpuResource* resource = fPurgeableQueue.peek();
498
499 const skgpu::StdSteadyClock::time_point resourceTime =
500 resource->cacheAccess().timeWhenResourceBecamePurgeable();
501 if (purgeTime && resourceTime >= *purgeTime) {
502 // Resources were given both LRU timestamps and tagged with a frame number when
503 // they first became purgeable. The LRU timestamp won't change again until the
504 // resource is made non-purgeable again. So, at this point all the remaining
505 // resources in the timestamp-sorted queue will have a frame number >= to this
506 // one.
507 break;
508 }
509
510 SkASSERT(resource->resourcePriv().isPurgeable());
511 resource->cacheAccess().release();
512 }
513 } else {
514 SkASSERT(opts == GrPurgeResourceOptions::kScratchResourcesOnly);
515 // Early out if the very first item is too new to purge to avoid sorting the queue when
516 // nothing will be deleted.
517 if (purgeTime && fPurgeableQueue.count() &&
518 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
519 return;
520 }
521
522 // Sort the queue
523 fPurgeableQueue.sort();
524
525 // Make a list of the scratch resources to delete
526 SkTDArray<GrGpuResource*> scratchResources;
527 for (int i = 0; i < fPurgeableQueue.count(); i++) {
528 GrGpuResource* resource = fPurgeableQueue.at(i);
529
530 const skgpu::StdSteadyClock::time_point resourceTime =
531 resource->cacheAccess().timeWhenResourceBecamePurgeable();
532 if (purgeTime && resourceTime >= *purgeTime) {
533 // scratch or not, all later iterations will be too recently used to purge.
534 break;
535 }
536 SkASSERT(resource->resourcePriv().isPurgeable());
537 if (!resource->getUniqueKey().isValid()) {
538 *scratchResources.append() = resource;
539 }
540 }
541
542 // Delete the scratch resources. This must be done as a separate pass
543 // to avoid messing up the sorted order of the queue
544 for (int i = 0; i < scratchResources.size(); i++) {
545 scratchResources[i]->cacheAccess().release();
546 }
547 }
548
549 this->validate();
550 }
551
purgeToMakeHeadroom(size_t desiredHeadroomBytes)552 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
553 AutoValidate av(this);
554 if (desiredHeadroomBytes > fMaxBytes) {
555 return false;
556 }
557 if (this->wouldFit(desiredHeadroomBytes)) {
558 return true;
559 }
560 fPurgeableQueue.sort();
561
562 size_t projectedBudget = fBudgetedBytes;
563 int purgeCnt = 0;
564 for (int i = 0; i < fPurgeableQueue.count(); i++) {
565 GrGpuResource* resource = fPurgeableQueue.at(i);
566 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
567 projectedBudget -= resource->gpuMemorySize();
568 }
569 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
570 purgeCnt = i + 1;
571 break;
572 }
573 }
574 if (purgeCnt == 0) {
575 return false;
576 }
577
578 // Success! Release the resources.
579 // Copy to array first so we don't mess with the queue.
580 std::vector<GrGpuResource*> resources;
581 resources.reserve(purgeCnt);
582 for (int i = 0; i < purgeCnt; i++) {
583 resources.push_back(fPurgeableQueue.at(i));
584 }
585 for (GrGpuResource* resource : resources) {
586 resource->cacheAccess().release();
587 }
588 return true;
589 }
590
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)591 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
592
593 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
594 bool stillOverbudget = tmpByteBudget < fBytes;
595
596 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
597 // Sort the queue
598 fPurgeableQueue.sort();
599
600 // Make a list of the scratch resources to delete
601 SkTDArray<GrGpuResource*> scratchResources;
602 size_t scratchByteCount = 0;
603 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
604 GrGpuResource* resource = fPurgeableQueue.at(i);
605 SkASSERT(resource->resourcePriv().isPurgeable());
606 if (!resource->getUniqueKey().isValid()) {
607 *scratchResources.append() = resource;
608 scratchByteCount += resource->gpuMemorySize();
609 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
610 }
611 }
612
613 // Delete the scratch resources. This must be done as a separate pass
614 // to avoid messing up the sorted order of the queue
615 for (int i = 0; i < scratchResources.size(); i++) {
616 scratchResources[i]->cacheAccess().release();
617 }
618 stillOverbudget = tmpByteBudget < fBytes;
619
620 this->validate();
621 }
622
623 // Purge any remaining resources in LRU order
624 if (stillOverbudget) {
625 const size_t cachedByteCount = fMaxBytes;
626 fMaxBytes = tmpByteBudget;
627 this->purgeAsNeeded();
628 fMaxBytes = cachedByteCount;
629 }
630 }
631
requestsFlush() const632 bool GrResourceCache::requestsFlush() const {
633 return this->overBudget() && !fPurgeableQueue.count() &&
634 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
635 }
636
processFreedGpuResources()637 void GrResourceCache::processFreedGpuResources() {
638 TArray<UnrefResourceMessage> msgs;
639 fUnrefResourceInbox.poll(&msgs);
640 // We don't need to do anything other than let the messages delete themselves and call unref.
641 }
642
addToNonpurgeableArray(GrGpuResource * resource)643 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
644 int index = fNonpurgeableResources.size();
645 *fNonpurgeableResources.append() = resource;
646 *resource->cacheAccess().accessCacheIndex() = index;
647 }
648
removeFromNonpurgeableArray(GrGpuResource * resource)649 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
650 int* index = resource->cacheAccess().accessCacheIndex();
651 // Fill the hole we will create in the array with the tail object, adjust its index, and
652 // then pop the array
653 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
654 SkASSERT(fNonpurgeableResources[*index] == resource);
655 fNonpurgeableResources[*index] = tail;
656 *tail->cacheAccess().accessCacheIndex() = *index;
657 fNonpurgeableResources.pop_back();
658 SkDEBUGCODE(*index = -1);
659 }
660
getNextTimestamp()661 uint32_t GrResourceCache::getNextTimestamp() {
662 // If we wrap then all the existing resources will appear older than any resources that get
663 // a timestamp after the wrap.
664 if (0 == fTimestamp) {
665 int count = this->getResourceCount();
666 if (count) {
667 // Reset all the timestamps. We sort the resources by timestamp and then assign
668 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
669 // rare.
670 SkTDArray<GrGpuResource*> sortedPurgeableResources;
671 sortedPurgeableResources.reserve(fPurgeableQueue.count());
672
673 while (fPurgeableQueue.count()) {
674 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
675 fPurgeableQueue.pop();
676 }
677
678 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
679 CompareTimestamp);
680
681 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
682 // timestamp and assign new timestamps.
683 int currP = 0;
684 int currNP = 0;
685 while (currP < sortedPurgeableResources.size() &&
686 currNP < fNonpurgeableResources.size()) {
687 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
688 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
689 SkASSERT(tsP != tsNP);
690 if (tsP < tsNP) {
691 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
692 } else {
693 // Correct the index in the nonpurgeable array stored on the resource post-sort.
694 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
695 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
696 }
697 }
698
699 // The above loop ended when we hit the end of one array. Finish the other one.
700 while (currP < sortedPurgeableResources.size()) {
701 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
702 }
703 while (currNP < fNonpurgeableResources.size()) {
704 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
705 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
706 }
707
708 // Rebuild the queue.
709 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
710 fPurgeableQueue.insert(sortedPurgeableResources[i]);
711 }
712
713 this->validate();
714 SkASSERT(count == this->getResourceCount());
715
716 // count should be the next timestamp we return.
717 SkASSERT(fTimestamp == SkToU32(count));
718 }
719 }
720 return fTimestamp++;
721 }
722
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const723 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
724 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
725 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
726 }
727 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
728 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
729 }
730 }
731
732 #if GR_CACHE_STATS
getStats(Stats * stats) const733 void GrResourceCache::getStats(Stats* stats) const {
734 stats->reset();
735
736 stats->fTotal = this->getResourceCount();
737 stats->fNumNonPurgeable = fNonpurgeableResources.size();
738 stats->fNumPurgeable = fPurgeableQueue.count();
739
740 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
741 stats->update(fNonpurgeableResources[i]);
742 }
743 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
744 stats->update(fPurgeableQueue.at(i));
745 }
746 }
747
748 #if defined(GR_TEST_UTILS)
dumpStats(SkString * out) const749 void GrResourceCache::dumpStats(SkString* out) const {
750 this->validate();
751
752 Stats stats;
753
754 this->getStats(&stats);
755
756 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
757
758 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
759 out->appendf("\t\tEntry Count: current %d"
760 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
761 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
762 stats.fScratch, fHighWaterCount);
763 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
764 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
765 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
766 }
767
dumpStatsKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const768 void GrResourceCache::dumpStatsKeyValuePairs(TArray<SkString>* keys,
769 TArray<double>* values) const {
770 this->validate();
771
772 Stats stats;
773 this->getStats(&stats);
774
775 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
776 }
777 #endif // defined(GR_TEST_UTILS)
778 #endif // GR_CACHE_STATS
779
780 #ifdef SK_DEBUG
validate() const781 void GrResourceCache::validate() const {
782 // Reduce the frequency of validations for large resource counts.
783 static SkRandom gRandom;
784 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
785 if (~mask && (gRandom.nextU() & mask)) {
786 return;
787 }
788
789 struct Stats {
790 size_t fBytes;
791 int fBudgetedCount;
792 size_t fBudgetedBytes;
793 int fLocked;
794 int fScratch;
795 int fCouldBeScratch;
796 int fContent;
797 const ScratchMap* fScratchMap;
798 const UniqueHash* fUniqueHash;
799
800 Stats(const GrResourceCache* cache) {
801 memset(this, 0, sizeof(*this));
802 fScratchMap = &cache->fScratchMap;
803 fUniqueHash = &cache->fUniqueHash;
804 }
805
806 void update(GrGpuResource* resource) {
807 fBytes += resource->gpuMemorySize();
808
809 if (!resource->resourcePriv().isPurgeable()) {
810 ++fLocked;
811 }
812
813 const skgpu::ScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
814 const skgpu::UniqueKey& uniqueKey = resource->getUniqueKey();
815
816 if (resource->cacheAccess().isUsableAsScratch()) {
817 SkASSERT(!uniqueKey.isValid());
818 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
819 SkASSERT(!resource->cacheAccess().hasRef());
820 ++fScratch;
821 SkASSERT(fScratchMap->countForKey(scratchKey));
822 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
823 } else if (scratchKey.isValid()) {
824 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
825 uniqueKey.isValid() || resource->cacheAccess().hasRef());
826 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
827 SkASSERT(!fScratchMap->has(resource, scratchKey));
828 }
829 if (uniqueKey.isValid()) {
830 ++fContent;
831 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
832 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
833 resource->resourcePriv().refsWrappedObjects());
834 }
835
836 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
837 ++fBudgetedCount;
838 fBudgetedBytes += resource->gpuMemorySize();
839 }
840 }
841 };
842
843 {
844 int count = 0;
845 fScratchMap.foreach([&](const GrGpuResource& resource) {
846 SkASSERT(resource.cacheAccess().isUsableAsScratch());
847 count++;
848 });
849 SkASSERT(count == fScratchMap.count());
850 }
851
852 Stats stats(this);
853 size_t purgeableBytes = 0;
854 int numBudgetedResourcesFlushWillMakePurgeable = 0;
855
856 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
857 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
858 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
859 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
860 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
861 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
862 !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
863 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
864 ++numBudgetedResourcesFlushWillMakePurgeable;
865 }
866 stats.update(fNonpurgeableResources[i]);
867 }
868 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
869 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
870 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
871 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
872 stats.update(fPurgeableQueue.at(i));
873 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
874 }
875
876 SkASSERT(fCount == this->getResourceCount());
877 SkASSERT(fBudgetedCount <= fCount);
878 SkASSERT(fBudgetedBytes <= fBytes);
879 SkASSERT(stats.fBytes == fBytes);
880 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
881 numBudgetedResourcesFlushWillMakePurgeable);
882 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
883 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
884 SkASSERT(purgeableBytes == fPurgeableBytes);
885 #if GR_CACHE_STATS
886 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
887 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
888 SkASSERT(fBytes <= fHighWaterBytes);
889 SkASSERT(fCount <= fHighWaterCount);
890 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
891 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
892 #endif
893 SkASSERT(stats.fContent == fUniqueHash.count());
894 SkASSERT(stats.fScratch == fScratchMap.count());
895
896 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
897 // calls. This will be fixed when subresource registration is explicit.
898 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
899 // SkASSERT(!overBudget || locked == count || fPurging);
900 }
901
isInCache(const GrGpuResource * resource) const902 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
903 int index = *resource->cacheAccess().accessCacheIndex();
904 if (index < 0) {
905 return false;
906 }
907 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
908 return true;
909 }
910 if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
911 return true;
912 }
913 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
914 return false;
915 }
916
917 #endif // SK_DEBUG
918
919 #if defined(GR_TEST_UTILS)
920
countUniqueKeysWithTag(const char * tag) const921 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
922 int count = 0;
923 fUniqueHash.foreach([&](const GrGpuResource& resource){
924 if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
925 ++count;
926 }
927 });
928 return count;
929 }
930
changeTimestamp(uint32_t newTimestamp)931 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
932 fTimestamp = newTimestamp;
933 }
934
visitSurfaces(const std::function<void (const GrSurface *,bool purgeable)> & func) const935 void GrResourceCache::visitSurfaces(
936 const std::function<void(const GrSurface*, bool purgeable)>& func) const {
937
938 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
939 if (const GrSurface* surf = fNonpurgeableResources[i]->asSurface()) {
940 func(surf, /* purgeable= */ false);
941 }
942 }
943 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
944 if (const GrSurface* surf = fPurgeableQueue.at(i)->asSurface()) {
945 func(surf, /* purgeable= */ true);
946 }
947 }
948 }
949
950 #endif // defined(GR_TEST_UTILS)
951