1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include "include/gpu/GrContext.h"
11 #include "include/gpu/GrTexture.h"
12 #include "include/private/GrSingleOwner.h"
13 #include "include/private/SkTo.h"
14 #include "include/utils/SkRandom.h"
15 #include "src/core/SkExchange.h"
16 #include "src/core/SkMessageBus.h"
17 #include "src/core/SkOpts.h"
18 #include "src/core/SkScopeExit.h"
19 #include "src/core/SkTSort.h"
20 #include "src/gpu/GrCaps.h"
21 #include "src/gpu/GrContextPriv.h"
22 #include "src/gpu/GrGpuResourceCacheAccess.h"
23 #include "src/gpu/GrProxyProvider.h"
24 #include "src/gpu/GrTextureProxyCacheAccess.h"
25 #include "src/gpu/GrTracing.h"
26 #include "src/gpu/SkGr.h"
27
28 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
29
30 DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
31
32 #define ASSERT_SINGLE_OWNER \
33 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
34
35 //////////////////////////////////////////////////////////////////////////////
36
GenerateResourceType()37 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
38 static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
39
40 int32_t type = nextType++;
41 if (type > SkTo<int32_t>(UINT16_MAX)) {
42 SK_ABORT("Too many Resource Types");
43 }
44
45 return static_cast<ResourceType>(type);
46 }
47
GenerateDomain()48 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
49 static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
50
51 int32_t domain = nextDomain++;
52 if (domain > SkTo<int32_t>(UINT16_MAX)) {
53 SK_ABORT("Too many GrUniqueKey Domains");
54 }
55
56 return static_cast<Domain>(domain);
57 }
58
GrResourceKeyHash(const uint32_t * data,size_t size)59 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
60 return SkOpts::hash(data, size);
61 }
62
63 //////////////////////////////////////////////////////////////////////////////
64
65 class GrResourceCache::AutoValidate : ::SkNoncopyable {
66 public:
AutoValidate(GrResourceCache * cache)67 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()68 ~AutoValidate() { fCache->validate(); }
69 private:
70 GrResourceCache* fCache;
71 };
72
73 //////////////////////////////////////////////////////////////////////////////
74
75 inline GrResourceCache::ResourceAwaitingUnref::ResourceAwaitingUnref() = default;
76
ResourceAwaitingUnref(GrGpuResource * resource)77 inline GrResourceCache::ResourceAwaitingUnref::ResourceAwaitingUnref(GrGpuResource* resource)
78 : fResource(resource), fNumUnrefs(1) {}
79
ResourceAwaitingUnref(ResourceAwaitingUnref && that)80 inline GrResourceCache::ResourceAwaitingUnref::ResourceAwaitingUnref(ResourceAwaitingUnref&& that) {
81 fResource = skstd::exchange(that.fResource, nullptr);
82 fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0);
83 }
84
operator =(ResourceAwaitingUnref && that)85 inline GrResourceCache::ResourceAwaitingUnref& GrResourceCache::ResourceAwaitingUnref::operator=(
86 ResourceAwaitingUnref&& that) {
87 fResource = skstd::exchange(that.fResource, nullptr);
88 fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0);
89 return *this;
90 }
91
~ResourceAwaitingUnref()92 inline GrResourceCache::ResourceAwaitingUnref::~ResourceAwaitingUnref() {
93 if (fResource) {
94 for (int i = 0; i < fNumUnrefs; ++i) {
95 fResource->unref();
96 }
97 }
98 }
99
addRef()100 inline void GrResourceCache::ResourceAwaitingUnref::addRef() { ++fNumUnrefs; }
101
unref()102 inline void GrResourceCache::ResourceAwaitingUnref::unref() {
103 SkASSERT(fNumUnrefs > 0);
104 fResource->unref();
105 --fNumUnrefs;
106 }
107
finished()108 inline bool GrResourceCache::ResourceAwaitingUnref::finished() { return !fNumUnrefs; }
109
110 //////////////////////////////////////////////////////////////////////////////
111
GrResourceCache(const GrCaps * caps,GrSingleOwner * singleOwner,uint32_t contextUniqueID)112 GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner,
113 uint32_t contextUniqueID)
114 : fInvalidUniqueKeyInbox(contextUniqueID)
115 , fFreedGpuResourceInbox(contextUniqueID)
116 , fContextUniqueID(contextUniqueID)
117 , fSingleOwner(singleOwner)
118 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
119 SkASSERT(contextUniqueID != SK_InvalidUniqueID);
120 }
121
~GrResourceCache()122 GrResourceCache::~GrResourceCache() {
123 this->releaseAll();
124 }
125
setLimits(int count,size_t bytes)126 void GrResourceCache::setLimits(int count, size_t bytes) {
127 fMaxCount = count;
128 fMaxBytes = bytes;
129 this->purgeAsNeeded();
130 }
131
insertResource(GrGpuResource * resource)132 void GrResourceCache::insertResource(GrGpuResource* resource) {
133 ASSERT_SINGLE_OWNER
134 SkASSERT(resource);
135 SkASSERT(!this->isInCache(resource));
136 SkASSERT(!resource->wasDestroyed());
137 SkASSERT(!resource->resourcePriv().isPurgeable());
138
139 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
140 // up iterating over all the resources that already have timestamps.
141 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
142
143 this->addToNonpurgeableArray(resource);
144
145 size_t size = resource->gpuMemorySize();
146 SkDEBUGCODE(++fCount;)
147 fBytes += size;
148 #if GR_CACHE_STATS
149 fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
150 fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
151 #endif
152 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
153 ++fBudgetedCount;
154 fBudgetedBytes += size;
155 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
156 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
157 #if GR_CACHE_STATS
158 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
159 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
160 #endif
161 }
162 if (resource->resourcePriv().getScratchKey().isValid() &&
163 !resource->getUniqueKey().isValid()) {
164 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
165 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
166 }
167
168 this->purgeAsNeeded();
169 }
170
removeResource(GrGpuResource * resource)171 void GrResourceCache::removeResource(GrGpuResource* resource) {
172 ASSERT_SINGLE_OWNER
173 this->validate();
174 SkASSERT(this->isInCache(resource));
175
176 size_t size = resource->gpuMemorySize();
177 if (resource->resourcePriv().isPurgeable()) {
178 fPurgeableQueue.remove(resource);
179 fPurgeableBytes -= size;
180 } else {
181 this->removeFromNonpurgeableArray(resource);
182 }
183
184 SkDEBUGCODE(--fCount;)
185 fBytes -= size;
186 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
187 --fBudgetedCount;
188 fBudgetedBytes -= size;
189 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
190 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
191 }
192
193 if (resource->resourcePriv().getScratchKey().isValid() &&
194 !resource->getUniqueKey().isValid()) {
195 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
196 }
197 if (resource->getUniqueKey().isValid()) {
198 fUniqueHash.remove(resource->getUniqueKey());
199 }
200 this->validate();
201 }
202
abandonAll()203 void GrResourceCache::abandonAll() {
204 AutoValidate av(this);
205
206 // We need to make sure to free any resources that were waiting on a free message but never
207 // received one.
208 fResourcesAwaitingUnref.reset();
209
210 while (fNonpurgeableResources.count()) {
211 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
212 SkASSERT(!back->wasDestroyed());
213 back->cacheAccess().abandon();
214 }
215
216 while (fPurgeableQueue.count()) {
217 GrGpuResource* top = fPurgeableQueue.peek();
218 SkASSERT(!top->wasDestroyed());
219 top->cacheAccess().abandon();
220 }
221
222 SkASSERT(!fScratchMap.count());
223 SkASSERT(!fUniqueHash.count());
224 SkASSERT(!fCount);
225 SkASSERT(!this->getResourceCount());
226 SkASSERT(!fBytes);
227 SkASSERT(!fBudgetedCount);
228 SkASSERT(!fBudgetedBytes);
229 SkASSERT(!fPurgeableBytes);
230 SkASSERT(!fResourcesAwaitingUnref.count());
231 }
232
releaseAll()233 void GrResourceCache::releaseAll() {
234 AutoValidate av(this);
235
236 this->processFreedGpuResources();
237
238 // We need to make sure to free any resources that were waiting on a free message but never
239 // received one.
240 fResourcesAwaitingUnref.reset();
241
242 SkASSERT(fProxyProvider); // better have called setProxyProvider
243 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
244 // they also have a raw pointer back to this class (which is presumably going away)!
245 fProxyProvider->removeAllUniqueKeys();
246
247 while (fNonpurgeableResources.count()) {
248 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
249 SkASSERT(!back->wasDestroyed());
250 back->cacheAccess().release();
251 }
252
253 while (fPurgeableQueue.count()) {
254 GrGpuResource* top = fPurgeableQueue.peek();
255 SkASSERT(!top->wasDestroyed());
256 top->cacheAccess().release();
257 }
258
259 SkASSERT(!fScratchMap.count());
260 SkASSERT(!fUniqueHash.count());
261 SkASSERT(!fCount);
262 SkASSERT(!this->getResourceCount());
263 SkASSERT(!fBytes);
264 SkASSERT(!fBudgetedCount);
265 SkASSERT(!fBudgetedBytes);
266 SkASSERT(!fPurgeableBytes);
267 SkASSERT(!fResourcesAwaitingUnref.count());
268 }
269
refResource(GrGpuResource * resource)270 void GrResourceCache::refResource(GrGpuResource* resource) {
271 SkASSERT(resource);
272 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
273 if (resource->cacheAccess().hasRef()) {
274 resource->ref();
275 } else {
276 this->refAndMakeResourceMRU(resource);
277 }
278 this->validate();
279 }
280
281 class GrResourceCache::AvailableForScratchUse {
282 public:
AvailableForScratchUse(bool rejectPendingIO)283 AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
284
operator ()(const GrGpuResource * resource) const285 bool operator()(const GrGpuResource* resource) const {
286 SkASSERT(!resource->getUniqueKey().isValid() &&
287 resource->resourcePriv().getScratchKey().isValid());
288 if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
289 return false;
290 }
291 return !fRejectPendingIO || !resource->internalHasPendingIO();
292 }
293
294 private:
295 bool fRejectPendingIO;
296 };
297
findAndRefScratchResource(const GrScratchKey & scratchKey,size_t resourceSize,ScratchFlags flags)298 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
299 size_t resourceSize,
300 ScratchFlags flags) {
301 SkASSERT(scratchKey.isValid());
302
303 GrGpuResource* resource;
304 if (flags & (ScratchFlags::kPreferNoPendingIO | ScratchFlags::kRequireNoPendingIO)) {
305 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
306 if (resource) {
307 this->refAndMakeResourceMRU(resource);
308 this->validate();
309 return resource;
310 } else if (flags & ScratchFlags::kRequireNoPendingIO) {
311 return nullptr;
312 }
313 // We would prefer to consume more available VRAM rather than flushing
314 // immediately, but on ANGLE this can lead to starving of the GPU.
315 if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
316 // kPrefer is specified, we didn't find a resource without pending io,
317 // but there is still space in our budget for the resource so force
318 // the caller to allocate a new resource.
319 return nullptr;
320 }
321 }
322 resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
323 if (resource) {
324 this->refAndMakeResourceMRU(resource);
325 this->validate();
326 }
327 return resource;
328 }
329
willRemoveScratchKey(const GrGpuResource * resource)330 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
331 ASSERT_SINGLE_OWNER
332 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
333 if (!resource->getUniqueKey().isValid()) {
334 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
335 }
336 }
337
removeUniqueKey(GrGpuResource * resource)338 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
339 ASSERT_SINGLE_OWNER
340 // Someone has a ref to this resource in order to have removed the key. When the ref count
341 // reaches zero we will get a ref cnt notification and figure out what to do with it.
342 if (resource->getUniqueKey().isValid()) {
343 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
344 fUniqueHash.remove(resource->getUniqueKey());
345 }
346 resource->cacheAccess().removeUniqueKey();
347 if (resource->resourcePriv().getScratchKey().isValid()) {
348 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
349 }
350
351 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
352 // require purging. However, the resource must be ref'ed to get here and therefore can't
353 // be purgeable. We'll purge it when the refs reach zero.
354 SkASSERT(!resource->resourcePriv().isPurgeable());
355 this->validate();
356 }
357
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)358 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
359 ASSERT_SINGLE_OWNER
360 SkASSERT(resource);
361 SkASSERT(this->isInCache(resource));
362
363 // If another resource has the new key, remove its key then install the key on this resource.
364 if (newKey.isValid()) {
365 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
366 // If the old resource using the key is purgeable and is unreachable, then remove it.
367 if (!old->resourcePriv().getScratchKey().isValid() &&
368 old->resourcePriv().isPurgeable()) {
369 old->cacheAccess().release();
370 } else {
371 // removeUniqueKey expects an external owner of the resource.
372 this->removeUniqueKey(sk_ref_sp(old).get());
373 }
374 }
375 SkASSERT(nullptr == fUniqueHash.find(newKey));
376
377 // Remove the entry for this resource if it already has a unique key.
378 if (resource->getUniqueKey().isValid()) {
379 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
380 fUniqueHash.remove(resource->getUniqueKey());
381 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
382 } else {
383 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
384 // from the ScratchMap
385 if (resource->resourcePriv().getScratchKey().isValid()) {
386 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
387 }
388 }
389
390 resource->cacheAccess().setUniqueKey(newKey);
391 fUniqueHash.add(resource);
392 } else {
393 this->removeUniqueKey(resource);
394 }
395
396 this->validate();
397 }
398
refAndMakeResourceMRU(GrGpuResource * resource)399 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
400 ASSERT_SINGLE_OWNER
401 SkASSERT(resource);
402 SkASSERT(this->isInCache(resource));
403
404 if (resource->resourcePriv().isPurgeable()) {
405 // It's about to become unpurgeable.
406 fPurgeableBytes -= resource->gpuMemorySize();
407 fPurgeableQueue.remove(resource);
408 this->addToNonpurgeableArray(resource);
409 } else if (!resource->cacheAccess().hasRef() &&
410 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
411 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
412 fNumBudgetedResourcesFlushWillMakePurgeable--;
413 }
414 resource->cacheAccess().ref();
415
416 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
417 this->validate();
418 }
419
notifyCntReachedZero(GrGpuResource * resource,uint32_t flags)420 void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
421 ASSERT_SINGLE_OWNER
422 SkASSERT(resource);
423 SkASSERT(!resource->wasDestroyed());
424 SkASSERT(flags);
425 SkASSERT(this->isInCache(resource));
426 // This resource should always be in the nonpurgeable array when this function is called. It
427 // will be moved to the queue if it is newly purgeable.
428 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
429
430 if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
431 #ifdef SK_DEBUG
432 // When the timestamp overflows validate() is called. validate() checks that resources in
433 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
434 // the purgeable queue happens just below in this function. So we mark it as an exception.
435 if (resource->resourcePriv().isPurgeable()) {
436 fNewlyPurgeableResourceForValidation = resource;
437 }
438 #endif
439 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
440 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
441 if (!resource->resourcePriv().isPurgeable() &&
442 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
443 SkASSERT(resource->resourcePriv().hasPendingIO_debugOnly());
444 ++fNumBudgetedResourcesFlushWillMakePurgeable;
445 }
446 } else {
447 // If this is budgeted and just became purgeable by dropping the last pending IO
448 // then it clearly no longer needs a flush to become purgeable.
449 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
450 resource->resourcePriv().isPurgeable()) {
451 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
452 fNumBudgetedResourcesFlushWillMakePurgeable--;
453 }
454 }
455
456 if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
457 SkASSERT(!resource->resourcePriv().isPurgeable());
458 return;
459 }
460
461 if (!resource->resourcePriv().isPurgeable()) {
462 this->validate();
463 return;
464 }
465
466 this->removeFromNonpurgeableArray(resource);
467 fPurgeableQueue.insert(resource);
468 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
469 fPurgeableBytes += resource->gpuMemorySize();
470
471 bool hasUniqueKey = resource->getUniqueKey().isValid();
472
473 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
474
475 if (budgetedType == GrBudgetedType::kBudgeted) {
476 // Purge the resource immediately if we're over budget
477 // Also purge if the resource has neither a valid scratch key nor a unique key.
478 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
479 if (!this->overBudget() && hasKey) {
480 return;
481 }
482 } else {
483 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
484 // they can be reused again by the image connected to the unique key.
485 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
486 return;
487 }
488 // Check whether this resource could still be used as a scratch resource.
489 if (!resource->resourcePriv().refsWrappedObjects() &&
490 resource->resourcePriv().getScratchKey().isValid()) {
491 // We won't purge an existing resource to make room for this one.
492 if (fBudgetedCount < fMaxCount &&
493 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
494 resource->resourcePriv().makeBudgeted();
495 return;
496 }
497 }
498 }
499
500 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
501 resource->cacheAccess().release();
502 // We should at least free this resource, perhaps dependent resources as well.
503 SkASSERT(this->getResourceCount() < beforeCount);
504 this->validate();
505 }
506
didChangeBudgetStatus(GrGpuResource * resource)507 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
508 ASSERT_SINGLE_OWNER
509 SkASSERT(resource);
510 SkASSERT(this->isInCache(resource));
511
512 size_t size = resource->gpuMemorySize();
513 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
514 // resource become purgeable. However, we should never allow that transition. Wrapped
515 // resources are the only resources that can be in that state and they aren't allowed to
516 // transition from one budgeted state to another.
517 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
518 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
519 ++fBudgetedCount;
520 fBudgetedBytes += size;
521 #if GR_CACHE_STATS
522 fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
523 fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
524 #endif
525 if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
526 ++fNumBudgetedResourcesFlushWillMakePurgeable;
527 }
528 this->purgeAsNeeded();
529 } else {
530 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
531 --fBudgetedCount;
532 fBudgetedBytes -= size;
533 if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
534 --fNumBudgetedResourcesFlushWillMakePurgeable;
535 }
536 }
537 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
538 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
539 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
540
541 this->validate();
542 }
543
purgeAsNeeded()544 void GrResourceCache::purgeAsNeeded() {
545 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
546 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
547 if (invalidKeyMsgs.count()) {
548 SkASSERT(fProxyProvider);
549
550 for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
551 fProxyProvider->processInvalidUniqueKey(invalidKeyMsgs[i].key(), nullptr,
552 GrProxyProvider::InvalidateGPUResource::kYes);
553 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
554 }
555 }
556
557 this->processFreedGpuResources();
558
559 bool stillOverbudget = this->overBudget();
560 while (stillOverbudget && fPurgeableQueue.count()) {
561 GrGpuResource* resource = fPurgeableQueue.peek();
562 SkASSERT(resource->resourcePriv().isPurgeable());
563 resource->cacheAccess().release();
564 stillOverbudget = this->overBudget();
565 }
566
567 this->validate();
568 }
569
purgeUnlockedResources(bool scratchResourcesOnly)570 void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) {
571 if (!scratchResourcesOnly) {
572 // We could disable maintaining the heap property here, but it would add a lot of
573 // complexity. Moreover, this is rarely called.
574 while (fPurgeableQueue.count()) {
575 GrGpuResource* resource = fPurgeableQueue.peek();
576 SkASSERT(resource->resourcePriv().isPurgeable());
577 resource->cacheAccess().release();
578 }
579 } else {
580 // Sort the queue
581 fPurgeableQueue.sort();
582
583 // Make a list of the scratch resources to delete
584 SkTDArray<GrGpuResource*> scratchResources;
585 for (int i = 0; i < fPurgeableQueue.count(); i++) {
586 GrGpuResource* resource = fPurgeableQueue.at(i);
587 SkASSERT(resource->resourcePriv().isPurgeable());
588 if (!resource->getUniqueKey().isValid()) {
589 *scratchResources.append() = resource;
590 }
591 }
592
593 // Delete the scratch resources. This must be done as a separate pass
594 // to avoid messing up the sorted order of the queue
595 for (int i = 0; i < scratchResources.count(); i++) {
596 scratchResources.getAt(i)->cacheAccess().release();
597 }
598 }
599
600 this->validate();
601 }
602
purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime)603 void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
604 while (fPurgeableQueue.count()) {
605 const GrStdSteadyClock::time_point resourceTime =
606 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
607 if (resourceTime >= purgeTime) {
608 // Resources were given both LRU timestamps and tagged with a frame number when
609 // they first became purgeable. The LRU timestamp won't change again until the
610 // resource is made non-purgeable again. So, at this point all the remaining
611 // resources in the timestamp-sorted queue will have a frame number >= to this
612 // one.
613 break;
614 }
615 GrGpuResource* resource = fPurgeableQueue.peek();
616 SkASSERT(resource->resourcePriv().isPurgeable());
617 resource->cacheAccess().release();
618 }
619 }
620
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)621 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
622
623 const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
624 bool stillOverbudget = tmpByteBudget < fBytes;
625
626 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
627 // Sort the queue
628 fPurgeableQueue.sort();
629
630 // Make a list of the scratch resources to delete
631 SkTDArray<GrGpuResource*> scratchResources;
632 size_t scratchByteCount = 0;
633 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
634 GrGpuResource* resource = fPurgeableQueue.at(i);
635 SkASSERT(resource->resourcePriv().isPurgeable());
636 if (!resource->getUniqueKey().isValid()) {
637 *scratchResources.append() = resource;
638 scratchByteCount += resource->gpuMemorySize();
639 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
640 }
641 }
642
643 // Delete the scratch resources. This must be done as a separate pass
644 // to avoid messing up the sorted order of the queue
645 for (int i = 0; i < scratchResources.count(); i++) {
646 scratchResources.getAt(i)->cacheAccess().release();
647 }
648 stillOverbudget = tmpByteBudget < fBytes;
649
650 this->validate();
651 }
652
653 // Purge any remaining resources in LRU order
654 if (stillOverbudget) {
655 const size_t cachedByteCount = fMaxBytes;
656 fMaxBytes = tmpByteBudget;
657 this->purgeAsNeeded();
658 fMaxBytes = cachedByteCount;
659 }
660 }
requestsFlush() const661 bool GrResourceCache::requestsFlush() const {
662 return this->overBudget() && !fPurgeableQueue.count() &&
663 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
664 }
665
666
insertDelayedResourceUnref(GrGpuResource * resource)667 void GrResourceCache::insertDelayedResourceUnref(GrGpuResource* resource) {
668 resource->ref();
669 uint32_t id = resource->uniqueID().asUInt();
670 if (auto* data = fResourcesAwaitingUnref.find(id)) {
671 data->addRef();
672 } else {
673 fResourcesAwaitingUnref.set(id, {resource});
674 }
675 }
676
processFreedGpuResources()677 void GrResourceCache::processFreedGpuResources() {
678 SkTArray<GrGpuResourceFreedMessage> msgs;
679 fFreedGpuResourceInbox.poll(&msgs);
680 for (int i = 0; i < msgs.count(); ++i) {
681 SkASSERT(msgs[i].fOwningUniqueID == fContextUniqueID);
682 uint32_t id = msgs[i].fResource->uniqueID().asUInt();
683 ResourceAwaitingUnref* info = fResourcesAwaitingUnref.find(id);
684 // If we called release or abandon on the GrContext we will have already released our ref on
685 // the GrGpuResource. If then the message arrives before the actual GrContext gets destroyed
686 // we will try to process the message when we destroy the GrContext. This protects us from
687 // trying to unref the resource twice.
688 if (info) {
689 info->unref();
690 if (info->finished()) {
691 fResourcesAwaitingUnref.remove(id);
692 }
693 }
694 }
695 }
696
addToNonpurgeableArray(GrGpuResource * resource)697 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
698 int index = fNonpurgeableResources.count();
699 *fNonpurgeableResources.append() = resource;
700 *resource->cacheAccess().accessCacheIndex() = index;
701 }
702
removeFromNonpurgeableArray(GrGpuResource * resource)703 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
704 int* index = resource->cacheAccess().accessCacheIndex();
705 // Fill the whole we will create in the array with the tail object, adjust its index, and
706 // then pop the array
707 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
708 SkASSERT(fNonpurgeableResources[*index] == resource);
709 fNonpurgeableResources[*index] = tail;
710 *tail->cacheAccess().accessCacheIndex() = *index;
711 fNonpurgeableResources.pop();
712 SkDEBUGCODE(*index = -1);
713 }
714
getNextTimestamp()715 uint32_t GrResourceCache::getNextTimestamp() {
716 // If we wrap then all the existing resources will appear older than any resources that get
717 // a timestamp after the wrap.
718 if (0 == fTimestamp) {
719 int count = this->getResourceCount();
720 if (count) {
721 // Reset all the timestamps. We sort the resources by timestamp and then assign
722 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
723 // rare.
724 SkTDArray<GrGpuResource*> sortedPurgeableResources;
725 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
726
727 while (fPurgeableQueue.count()) {
728 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
729 fPurgeableQueue.pop();
730 }
731
732 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
733 CompareTimestamp);
734
735 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
736 // timestamp and assign new timestamps.
737 int currP = 0;
738 int currNP = 0;
739 while (currP < sortedPurgeableResources.count() &&
740 currNP < fNonpurgeableResources.count()) {
741 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
742 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
743 SkASSERT(tsP != tsNP);
744 if (tsP < tsNP) {
745 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
746 } else {
747 // Correct the index in the nonpurgeable array stored on the resource post-sort.
748 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
749 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
750 }
751 }
752
753 // The above loop ended when we hit the end of one array. Finish the other one.
754 while (currP < sortedPurgeableResources.count()) {
755 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
756 }
757 while (currNP < fNonpurgeableResources.count()) {
758 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
759 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
760 }
761
762 // Rebuild the queue.
763 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
764 fPurgeableQueue.insert(sortedPurgeableResources[i]);
765 }
766
767 this->validate();
768 SkASSERT(count == this->getResourceCount());
769
770 // count should be the next timestamp we return.
771 SkASSERT(fTimestamp == SkToU32(count));
772 }
773 }
774 return fTimestamp++;
775 }
776
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const777 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
778 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
779 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
780 }
781 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
782 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
783 }
784 }
785
786 #if GR_CACHE_STATS
getStats(Stats * stats) const787 void GrResourceCache::getStats(Stats* stats) const {
788 stats->reset();
789
790 stats->fTotal = this->getResourceCount();
791 stats->fNumNonPurgeable = fNonpurgeableResources.count();
792 stats->fNumPurgeable = fPurgeableQueue.count();
793
794 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
795 stats->update(fNonpurgeableResources[i]);
796 }
797 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
798 stats->update(fPurgeableQueue.at(i));
799 }
800 }
801
802 #if GR_TEST_UTILS
dumpStats(SkString * out) const803 void GrResourceCache::dumpStats(SkString* out) const {
804 this->validate();
805
806 Stats stats;
807
808 this->getStats(&stats);
809
810 float countUtilization = (100.f * fBudgetedCount) / fMaxCount;
811 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
812
813 out->appendf("Budget: %d items %d bytes\n", fMaxCount, (int)fMaxBytes);
814 out->appendf("\t\tEntry Count: current %d"
815 " (%d budgeted, %d wrapped, %d locked, %d scratch %.2g%% full), high %d\n",
816 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
817 stats.fScratch, countUtilization, fHighWaterCount);
818 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
819 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
820 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
821 }
822
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const823 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
824 SkTArray<double>* values) const {
825 this->validate();
826
827 Stats stats;
828 this->getStats(&stats);
829
830 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
831 }
832 #endif
833
834 #endif
835
836 #ifdef SK_DEBUG
validate() const837 void GrResourceCache::validate() const {
838 // Reduce the frequency of validations for large resource counts.
839 static SkRandom gRandom;
840 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
841 if (~mask && (gRandom.nextU() & mask)) {
842 return;
843 }
844
845 struct Stats {
846 size_t fBytes;
847 int fBudgetedCount;
848 size_t fBudgetedBytes;
849 int fLocked;
850 int fScratch;
851 int fCouldBeScratch;
852 int fContent;
853 const ScratchMap* fScratchMap;
854 const UniqueHash* fUniqueHash;
855
856 Stats(const GrResourceCache* cache) {
857 memset(this, 0, sizeof(*this));
858 fScratchMap = &cache->fScratchMap;
859 fUniqueHash = &cache->fUniqueHash;
860 }
861
862 void update(GrGpuResource* resource) {
863 fBytes += resource->gpuMemorySize();
864
865 if (!resource->resourcePriv().isPurgeable()) {
866 ++fLocked;
867 }
868
869 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
870 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
871
872 if (resource->cacheAccess().isScratch()) {
873 SkASSERT(!uniqueKey.isValid());
874 ++fScratch;
875 SkASSERT(fScratchMap->countForKey(scratchKey));
876 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
877 } else if (scratchKey.isValid()) {
878 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
879 uniqueKey.isValid());
880 if (!uniqueKey.isValid()) {
881 ++fCouldBeScratch;
882 SkASSERT(fScratchMap->countForKey(scratchKey));
883 }
884 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
885 }
886 if (uniqueKey.isValid()) {
887 ++fContent;
888 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
889 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
890 resource->resourcePriv().refsWrappedObjects());
891
892 if (scratchKey.isValid()) {
893 SkASSERT(!fScratchMap->has(resource, scratchKey));
894 }
895 }
896
897 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
898 ++fBudgetedCount;
899 fBudgetedBytes += resource->gpuMemorySize();
900 }
901 }
902 };
903
904 {
905 ScratchMap::ConstIter iter(&fScratchMap);
906
907 int count = 0;
908 for ( ; !iter.done(); ++iter) {
909 const GrGpuResource* resource = *iter;
910 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
911 SkASSERT(!resource->getUniqueKey().isValid());
912 count++;
913 }
914 SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
915 }
916
917 Stats stats(this);
918 size_t purgeableBytes = 0;
919 int numBudgetedResourcesFlushWillMakePurgeable = 0;
920
921 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
922 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
923 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
924 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
925 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
926 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
927 !fNonpurgeableResources[i]->cacheAccess().hasRef() &&
928 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
929 SkASSERT(fNonpurgeableResources[i]->resourcePriv().hasPendingIO_debugOnly());
930 ++numBudgetedResourcesFlushWillMakePurgeable;
931 }
932 stats.update(fNonpurgeableResources[i]);
933 }
934 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
935 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
936 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
937 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
938 stats.update(fPurgeableQueue.at(i));
939 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
940 }
941
942 SkASSERT(fCount == this->getResourceCount());
943 SkASSERT(fBudgetedCount <= fCount);
944 SkASSERT(fBudgetedBytes <= fBytes);
945 SkASSERT(stats.fBytes == fBytes);
946 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
947 numBudgetedResourcesFlushWillMakePurgeable);
948 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
949 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
950 SkASSERT(purgeableBytes == fPurgeableBytes);
951 #if GR_CACHE_STATS
952 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
953 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
954 SkASSERT(fBytes <= fHighWaterBytes);
955 SkASSERT(fCount <= fHighWaterCount);
956 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
957 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
958 #endif
959 SkASSERT(stats.fContent == fUniqueHash.count());
960 SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
961
962 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
963 // calls. This will be fixed when subresource registration is explicit.
964 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
965 // SkASSERT(!overBudget || locked == count || fPurging);
966 }
967
isInCache(const GrGpuResource * resource) const968 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
969 int index = *resource->cacheAccess().accessCacheIndex();
970 if (index < 0) {
971 return false;
972 }
973 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
974 return true;
975 }
976 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
977 return true;
978 }
979 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
980 return false;
981 }
982
983 #endif
984