• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "GrResourceCache.h"
9 #include <atomic>
10 #include "GrCaps.h"
11 #include "GrGpuResourceCacheAccess.h"
12 #include "GrProxyProvider.h"
13 #include "GrSingleOwner.h"
14 #include "GrTexture.h"
15 #include "GrTextureProxyCacheAccess.h"
16 #include "GrTracing.h"
17 #include "SkGr.h"
18 #include "SkMessageBus.h"
19 #include "SkOpts.h"
20 #include "SkRandom.h"
21 #include "SkScopeExit.h"
22 #include "SkTSort.h"
23 #include "SkTo.h"
24 
25 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
26 
27 DECLARE_SKMESSAGEBUS_MESSAGE(GrGpuResourceFreedMessage);
28 
29 #define ASSERT_SINGLE_OWNER \
30     SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
31 
32 //////////////////////////////////////////////////////////////////////////////
33 
GenerateResourceType()34 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
35     static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
36 
37     int32_t type = nextType++;
38     if (type > SkTo<int32_t>(UINT16_MAX)) {
39         SK_ABORT("Too many Resource Types");
40     }
41 
42     return static_cast<ResourceType>(type);
43 }
44 
GenerateDomain()45 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
46     static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
47 
48     int32_t domain = nextDomain++;
49     if (domain > SkTo<int32_t>(UINT16_MAX)) {
50         SK_ABORT("Too many GrUniqueKey Domains");
51     }
52 
53     return static_cast<Domain>(domain);
54 }
55 
GrResourceKeyHash(const uint32_t * data,size_t size)56 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
57     return SkOpts::hash(data, size);
58 }
59 
60 //////////////////////////////////////////////////////////////////////////////
61 
62 class GrResourceCache::AutoValidate : ::SkNoncopyable {
63 public:
AutoValidate(GrResourceCache * cache)64     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()65     ~AutoValidate() { fCache->validate(); }
66 private:
67     GrResourceCache* fCache;
68 };
69 
70  //////////////////////////////////////////////////////////////////////////////
71 
GrResourceCache(const GrCaps * caps,GrSingleOwner * singleOwner,uint32_t contextUniqueID)72 GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner,
73                                  uint32_t contextUniqueID)
74         : fProxyProvider(nullptr)
75         , fTimestamp(0)
76         , fMaxCount(kDefaultMaxCount)
77         , fMaxBytes(kDefaultMaxSize)
78 #if GR_CACHE_STATS
79         , fHighWaterCount(0)
80         , fHighWaterBytes(0)
81         , fBudgetedHighWaterCount(0)
82         , fBudgetedHighWaterBytes(0)
83 #endif
84         , fBytes(0)
85         , fBudgetedCount(0)
86         , fBudgetedBytes(0)
87         , fPurgeableBytes(0)
88         , fInvalidUniqueKeyInbox(contextUniqueID)
89         , fFreedGpuResourceInbox(contextUniqueID)
90         , fContextUniqueID(contextUniqueID)
91         , fSingleOwner(singleOwner)
92         , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
93     SkASSERT(contextUniqueID != SK_InvalidUniqueID);
94     SkDEBUGCODE(fCount = 0;)
95     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr;)
96 }
97 
~GrResourceCache()98 GrResourceCache::~GrResourceCache() {
99     this->releaseAll();
100 }
101 
setLimits(int count,size_t bytes)102 void GrResourceCache::setLimits(int count, size_t bytes) {
103     fMaxCount = count;
104     fMaxBytes = bytes;
105     this->purgeAsNeeded();
106 }
107 
insertResource(GrGpuResource * resource)108 void GrResourceCache::insertResource(GrGpuResource* resource) {
109     ASSERT_SINGLE_OWNER
110     SkASSERT(resource);
111     SkASSERT(!this->isInCache(resource));
112     SkASSERT(!resource->wasDestroyed());
113     SkASSERT(!resource->resourcePriv().isPurgeable());
114 
115     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
116     // up iterating over all the resources that already have timestamps.
117     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
118 
119     this->addToNonpurgeableArray(resource);
120 
121     size_t size = resource->gpuMemorySize();
122     SkDEBUGCODE(++fCount;)
123     fBytes += size;
124 #if GR_CACHE_STATS
125     fHighWaterCount = SkTMax(this->getResourceCount(), fHighWaterCount);
126     fHighWaterBytes = SkTMax(fBytes, fHighWaterBytes);
127 #endif
128     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
129         ++fBudgetedCount;
130         fBudgetedBytes += size;
131         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
132                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
133 #if GR_CACHE_STATS
134         fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
135         fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
136 #endif
137     }
138     if (resource->resourcePriv().getScratchKey().isValid() &&
139         !resource->getUniqueKey().isValid()) {
140         SkASSERT(!resource->resourcePriv().refsWrappedObjects());
141         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
142     }
143 
144     this->purgeAsNeeded();
145 }
146 
removeResource(GrGpuResource * resource)147 void GrResourceCache::removeResource(GrGpuResource* resource) {
148     ASSERT_SINGLE_OWNER
149     this->validate();
150     SkASSERT(this->isInCache(resource));
151 
152     size_t size = resource->gpuMemorySize();
153     if (resource->resourcePriv().isPurgeable()) {
154         fPurgeableQueue.remove(resource);
155         fPurgeableBytes -= size;
156     } else {
157         this->removeFromNonpurgeableArray(resource);
158     }
159 
160     SkDEBUGCODE(--fCount;)
161     fBytes -= size;
162     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
163         --fBudgetedCount;
164         fBudgetedBytes -= size;
165         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
166                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
167     }
168 
169     if (resource->resourcePriv().getScratchKey().isValid() &&
170         !resource->getUniqueKey().isValid()) {
171         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
172     }
173     if (resource->getUniqueKey().isValid()) {
174         fUniqueHash.remove(resource->getUniqueKey());
175     }
176     this->validate();
177 }
178 
abandonAll()179 void GrResourceCache::abandonAll() {
180     AutoValidate av(this);
181 
182     for (int i = 0; i < fResourcesWaitingForFreeMsg.count(); ++i) {
183         fResourcesWaitingForFreeMsg[i]->cacheAccess().abandon();
184     }
185     fResourcesWaitingForFreeMsg.reset();
186 
187     while (fNonpurgeableResources.count()) {
188         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
189         SkASSERT(!back->wasDestroyed());
190         back->cacheAccess().abandon();
191     }
192 
193     while (fPurgeableQueue.count()) {
194         GrGpuResource* top = fPurgeableQueue.peek();
195         SkASSERT(!top->wasDestroyed());
196         top->cacheAccess().abandon();
197     }
198 
199     SkASSERT(!fScratchMap.count());
200     SkASSERT(!fUniqueHash.count());
201     SkASSERT(!fCount);
202     SkASSERT(!this->getResourceCount());
203     SkASSERT(!fBytes);
204     SkASSERT(!fBudgetedCount);
205     SkASSERT(!fBudgetedBytes);
206     SkASSERT(!fPurgeableBytes);
207     SkASSERT(!fResourcesWaitingForFreeMsg.count());
208 }
209 
releaseAll()210 void GrResourceCache::releaseAll() {
211     AutoValidate av(this);
212 
213     this->processFreedGpuResources();
214 
215     // We need to make sure to free any resources that were waiting on a free message but never
216     // received one.
217     for (int i = 0; i < fResourcesWaitingForFreeMsg.count(); ++i) {
218         fResourcesWaitingForFreeMsg[i]->unref();
219     }
220     fResourcesWaitingForFreeMsg.reset();
221 
222     SkASSERT(fProxyProvider); // better have called setProxyProvider
223     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
224     // they also have a raw pointer back to this class (which is presumably going away)!
225     fProxyProvider->removeAllUniqueKeys();
226 
227     while (fNonpurgeableResources.count()) {
228         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
229         SkASSERT(!back->wasDestroyed());
230         back->cacheAccess().release();
231     }
232 
233     while (fPurgeableQueue.count()) {
234         GrGpuResource* top = fPurgeableQueue.peek();
235         SkASSERT(!top->wasDestroyed());
236         top->cacheAccess().release();
237     }
238 
239     SkASSERT(!fScratchMap.count());
240     SkASSERT(!fUniqueHash.count());
241     SkASSERT(!fCount);
242     SkASSERT(!this->getResourceCount());
243     SkASSERT(!fBytes);
244     SkASSERT(!fBudgetedCount);
245     SkASSERT(!fBudgetedBytes);
246     SkASSERT(!fPurgeableBytes);
247     SkASSERT(!fResourcesWaitingForFreeMsg.count());
248 }
249 
250 class GrResourceCache::AvailableForScratchUse {
251 public:
AvailableForScratchUse(bool rejectPendingIO)252     AvailableForScratchUse(bool rejectPendingIO) : fRejectPendingIO(rejectPendingIO) { }
253 
operator ()(const GrGpuResource * resource) const254     bool operator()(const GrGpuResource* resource) const {
255         SkASSERT(!resource->getUniqueKey().isValid() &&
256                  resource->resourcePriv().getScratchKey().isValid());
257         if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
258             return false;
259         }
260         return !fRejectPendingIO || !resource->internalHasPendingIO();
261     }
262 
263 private:
264     bool fRejectPendingIO;
265 };
266 
findAndRefScratchResource(const GrScratchKey & scratchKey,size_t resourceSize,ScratchFlags flags)267 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey,
268                                                           size_t resourceSize,
269                                                           ScratchFlags flags) {
270     SkASSERT(scratchKey.isValid());
271 
272     GrGpuResource* resource;
273     if (flags & (ScratchFlags::kPreferNoPendingIO | ScratchFlags::kRequireNoPendingIO)) {
274         resource = fScratchMap.find(scratchKey, AvailableForScratchUse(true));
275         if (resource) {
276             this->refAndMakeResourceMRU(resource);
277             this->validate();
278             return resource;
279         } else if (flags & ScratchFlags::kRequireNoPendingIO) {
280             return nullptr;
281         }
282         // We would prefer to consume more available VRAM rather than flushing
283         // immediately, but on ANGLE this can lead to starving of the GPU.
284         if (fPreferVRAMUseOverFlushes && this->wouldFit(resourceSize)) {
285             // kPrefer is specified, we didn't find a resource without pending io,
286             // but there is still space in our budget for the resource so force
287             // the caller to allocate a new resource.
288             return nullptr;
289         }
290     }
291     resource = fScratchMap.find(scratchKey, AvailableForScratchUse(false));
292     if (resource) {
293         this->refAndMakeResourceMRU(resource);
294         this->validate();
295     }
296     return resource;
297 }
298 
willRemoveScratchKey(const GrGpuResource * resource)299 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
300     ASSERT_SINGLE_OWNER
301     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
302     if (!resource->getUniqueKey().isValid()) {
303         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
304     }
305 }
306 
removeUniqueKey(GrGpuResource * resource)307 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
308     ASSERT_SINGLE_OWNER
309     // Someone has a ref to this resource in order to have removed the key. When the ref count
310     // reaches zero we will get a ref cnt notification and figure out what to do with it.
311     if (resource->getUniqueKey().isValid()) {
312         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
313         fUniqueHash.remove(resource->getUniqueKey());
314     }
315     resource->cacheAccess().removeUniqueKey();
316     if (resource->resourcePriv().getScratchKey().isValid()) {
317         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
318     }
319 
320     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
321     // require purging. However, the resource must be ref'ed to get here and therefore can't
322     // be purgeable. We'll purge it when the refs reach zero.
323     SkASSERT(!resource->resourcePriv().isPurgeable());
324     this->validate();
325 }
326 
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)327 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
328     ASSERT_SINGLE_OWNER
329     SkASSERT(resource);
330     SkASSERT(this->isInCache(resource));
331 
332     // If another resource has the new key, remove its key then install the key on this resource.
333     if (newKey.isValid()) {
334         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
335             // If the old resource using the key is purgeable and is unreachable, then remove it.
336             if (!old->resourcePriv().getScratchKey().isValid() &&
337                 old->resourcePriv().isPurgeable()) {
338                 old->cacheAccess().release();
339             } else {
340                 // removeUniqueKey expects an external owner of the resource.
341                 this->removeUniqueKey(sk_ref_sp(old).get());
342             }
343         }
344         SkASSERT(nullptr == fUniqueHash.find(newKey));
345 
346         // Remove the entry for this resource if it already has a unique key.
347         if (resource->getUniqueKey().isValid()) {
348             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
349             fUniqueHash.remove(resource->getUniqueKey());
350             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
351         } else {
352             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
353             // from the ScratchMap
354             if (resource->resourcePriv().getScratchKey().isValid()) {
355                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
356             }
357         }
358 
359         resource->cacheAccess().setUniqueKey(newKey);
360         fUniqueHash.add(resource);
361     } else {
362         this->removeUniqueKey(resource);
363     }
364 
365     this->validate();
366 }
367 
refAndMakeResourceMRU(GrGpuResource * resource)368 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
369     ASSERT_SINGLE_OWNER
370     SkASSERT(resource);
371     SkASSERT(this->isInCache(resource));
372 
373     if (resource->resourcePriv().isPurgeable()) {
374         // It's about to become unpurgeable.
375         fPurgeableBytes -= resource->gpuMemorySize();
376         fPurgeableQueue.remove(resource);
377         this->addToNonpurgeableArray(resource);
378     }
379     resource->ref();
380 
381     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
382     this->validate();
383 }
384 
notifyCntReachedZero(GrGpuResource * resource,uint32_t flags)385 void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t flags) {
386     ASSERT_SINGLE_OWNER
387     SkASSERT(resource);
388     SkASSERT(!resource->wasDestroyed());
389     SkASSERT(flags);
390     SkASSERT(this->isInCache(resource));
391     // This resource should always be in the nonpurgeable array when this function is called. It
392     // will be moved to the queue if it is newly purgeable.
393     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
394 
395     if (SkToBool(ResourceAccess::kRefCntReachedZero_RefNotificationFlag & flags)) {
396 #ifdef SK_DEBUG
397         // When the timestamp overflows validate() is called. validate() checks that resources in
398         // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
399         // the purgeable queue happens just below in this function. So we mark it as an exception.
400         if (resource->resourcePriv().isPurgeable()) {
401             fNewlyPurgeableResourceForValidation = resource;
402         }
403 #endif
404         resource->cacheAccess().setTimestamp(this->getNextTimestamp());
405         SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
406     }
407 
408     if (!SkToBool(ResourceAccess::kAllCntsReachedZero_RefNotificationFlag & flags)) {
409         SkASSERT(!resource->resourcePriv().isPurgeable());
410         return;
411     }
412 
413     if (!resource->resourcePriv().isPurgeable()) {
414         this->validate();
415         return;
416     }
417 
418     this->removeFromNonpurgeableArray(resource);
419     fPurgeableQueue.insert(resource);
420     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
421     fPurgeableBytes += resource->gpuMemorySize();
422 
423     bool hasUniqueKey = resource->getUniqueKey().isValid();
424 
425     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
426 
427     if (budgetedType == GrBudgetedType::kBudgeted) {
428         // Purge the resource immediately if we're over budget
429         // Also purge if the resource has neither a valid scratch key nor a unique key.
430         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
431         if (!this->overBudget() && hasKey) {
432             return;
433         }
434     } else {
435         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
436         // they can be reused again by the image connected to the unique key.
437         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
438             return;
439         }
440         // Check whether this resource could still be used as a scratch resource.
441         if (!resource->resourcePriv().refsWrappedObjects() &&
442             resource->resourcePriv().getScratchKey().isValid()) {
443             // We won't purge an existing resource to make room for this one.
444             if (fBudgetedCount < fMaxCount &&
445                 fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
446                 resource->resourcePriv().makeBudgeted();
447                 return;
448             }
449         }
450     }
451 
452     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
453     resource->cacheAccess().release();
454     // We should at least free this resource, perhaps dependent resources as well.
455     SkASSERT(this->getResourceCount() < beforeCount);
456     this->validate();
457 }
458 
didChangeBudgetStatus(GrGpuResource * resource)459 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
460     ASSERT_SINGLE_OWNER
461     SkASSERT(resource);
462     SkASSERT(this->isInCache(resource));
463 
464     size_t size = resource->gpuMemorySize();
465     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
466     // resource become purgeable. However, we should never allow that transition. Wrapped
467     // resources are the only resources that can be in that state and they aren't allowed to
468     // transition from one budgeted state to another.
469     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
470     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
471         ++fBudgetedCount;
472         fBudgetedBytes += size;
473 #if GR_CACHE_STATS
474         fBudgetedHighWaterBytes = SkTMax(fBudgetedBytes, fBudgetedHighWaterBytes);
475         fBudgetedHighWaterCount = SkTMax(fBudgetedCount, fBudgetedHighWaterCount);
476 #endif
477         this->purgeAsNeeded();
478     } else {
479         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
480         --fBudgetedCount;
481         fBudgetedBytes -= size;
482     }
483     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
484     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
485                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
486 
487     this->validate();
488 }
489 
purgeAsNeeded()490 void GrResourceCache::purgeAsNeeded() {
491     SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
492     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
493     if (invalidKeyMsgs.count()) {
494         SkASSERT(fProxyProvider);
495 
496         for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
497             fProxyProvider->processInvalidUniqueKey(invalidKeyMsgs[i].key(), nullptr,
498                                                     GrProxyProvider::InvalidateGPUResource::kYes);
499             SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
500         }
501     }
502 
503     this->processFreedGpuResources();
504 
505     bool stillOverbudget = this->overBudget();
506     while (stillOverbudget && fPurgeableQueue.count()) {
507         GrGpuResource* resource = fPurgeableQueue.peek();
508         SkASSERT(resource->resourcePriv().isPurgeable());
509         resource->cacheAccess().release();
510         stillOverbudget = this->overBudget();
511     }
512 
513     this->validate();
514 }
515 
purgeUnlockedResources(bool scratchResourcesOnly)516 void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) {
517     if (!scratchResourcesOnly) {
518         // We could disable maintaining the heap property here, but it would add a lot of
519         // complexity. Moreover, this is rarely called.
520         while (fPurgeableQueue.count()) {
521             GrGpuResource* resource = fPurgeableQueue.peek();
522             SkASSERT(resource->resourcePriv().isPurgeable());
523             resource->cacheAccess().release();
524         }
525     } else {
526         // Sort the queue
527         fPurgeableQueue.sort();
528 
529         // Make a list of the scratch resources to delete
530         SkTDArray<GrGpuResource*> scratchResources;
531         for (int i = 0; i < fPurgeableQueue.count(); i++) {
532             GrGpuResource* resource = fPurgeableQueue.at(i);
533             SkASSERT(resource->resourcePriv().isPurgeable());
534             if (!resource->getUniqueKey().isValid()) {
535                 *scratchResources.append() = resource;
536             }
537         }
538 
539         // Delete the scratch resources. This must be done as a separate pass
540         // to avoid messing up the sorted order of the queue
541         for (int i = 0; i < scratchResources.count(); i++) {
542             scratchResources.getAt(i)->cacheAccess().release();
543         }
544     }
545 
546     this->validate();
547 }
548 
purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime)549 void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
550     while (fPurgeableQueue.count()) {
551         const GrStdSteadyClock::time_point resourceTime =
552                 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
553         if (resourceTime >= purgeTime) {
554             // Resources were given both LRU timestamps and tagged with a frame number when
555             // they first became purgeable. The LRU timestamp won't change again until the
556             // resource is made non-purgeable again. So, at this point all the remaining
557             // resources in the timestamp-sorted queue will have a frame number >= to this
558             // one.
559             break;
560         }
561         GrGpuResource* resource = fPurgeableQueue.peek();
562         SkASSERT(resource->resourcePriv().isPurgeable());
563         resource->cacheAccess().release();
564     }
565 }
566 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)567 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
568 
569     const size_t tmpByteBudget = SkTMax((size_t)0, fBytes - bytesToPurge);
570     bool stillOverbudget = tmpByteBudget < fBytes;
571 
572     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
573         // Sort the queue
574         fPurgeableQueue.sort();
575 
576         // Make a list of the scratch resources to delete
577         SkTDArray<GrGpuResource*> scratchResources;
578         size_t scratchByteCount = 0;
579         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
580             GrGpuResource* resource = fPurgeableQueue.at(i);
581             SkASSERT(resource->resourcePriv().isPurgeable());
582             if (!resource->getUniqueKey().isValid()) {
583                 *scratchResources.append() = resource;
584                 scratchByteCount += resource->gpuMemorySize();
585                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
586             }
587         }
588 
589         // Delete the scratch resources. This must be done as a separate pass
590         // to avoid messing up the sorted order of the queue
591         for (int i = 0; i < scratchResources.count(); i++) {
592             scratchResources.getAt(i)->cacheAccess().release();
593         }
594         stillOverbudget = tmpByteBudget < fBytes;
595 
596         this->validate();
597     }
598 
599     // Purge any remaining resources in LRU order
600     if (stillOverbudget) {
601         const size_t cachedByteCount = fMaxBytes;
602         fMaxBytes = tmpByteBudget;
603         this->purgeAsNeeded();
604         fMaxBytes = cachedByteCount;
605     }
606 }
607 
insertCrossContextGpuResource(GrGpuResource * resource)608 void GrResourceCache::insertCrossContextGpuResource(GrGpuResource* resource) {
609     resource->ref();
610     SkASSERT(!fResourcesWaitingForFreeMsg.contains(resource));
611     fResourcesWaitingForFreeMsg.push_back(resource);
612 }
613 
processFreedGpuResources()614 void GrResourceCache::processFreedGpuResources() {
615     SkTArray<GrGpuResourceFreedMessage> msgs;
616     fFreedGpuResourceInbox.poll(&msgs);
617     for (int i = 0; i < msgs.count(); ++i) {
618         SkASSERT(msgs[i].fOwningUniqueID == fContextUniqueID);
619         int index = fResourcesWaitingForFreeMsg.find(msgs[i].fResource);
620         // If we called release or abandon on the GrContext we will have already released our ref on
621         // the GrGpuResource. If then the message arrives before the actual GrContext gets destroyed
622         // we will try to process the message when we destroy the GrContext. This protects us from
623         // trying to unref the resource twice.
624         if (index != -1) {
625             fResourcesWaitingForFreeMsg.removeShuffle(index);
626             msgs[i].fResource->unref();
627         }
628     }
629 }
630 
addToNonpurgeableArray(GrGpuResource * resource)631 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
632     int index = fNonpurgeableResources.count();
633     *fNonpurgeableResources.append() = resource;
634     *resource->cacheAccess().accessCacheIndex() = index;
635 }
636 
removeFromNonpurgeableArray(GrGpuResource * resource)637 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
638     int* index = resource->cacheAccess().accessCacheIndex();
639     // Fill the whole we will create in the array with the tail object, adjust its index, and
640     // then pop the array
641     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
642     SkASSERT(fNonpurgeableResources[*index] == resource);
643     fNonpurgeableResources[*index] = tail;
644     *tail->cacheAccess().accessCacheIndex() = *index;
645     fNonpurgeableResources.pop();
646     SkDEBUGCODE(*index = -1);
647 }
648 
getNextTimestamp()649 uint32_t GrResourceCache::getNextTimestamp() {
650     // If we wrap then all the existing resources will appear older than any resources that get
651     // a timestamp after the wrap.
652     if (0 == fTimestamp) {
653         int count = this->getResourceCount();
654         if (count) {
655             // Reset all the timestamps. We sort the resources by timestamp and then assign
656             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
657             // rare.
658             SkTDArray<GrGpuResource*> sortedPurgeableResources;
659             sortedPurgeableResources.setReserve(fPurgeableQueue.count());
660 
661             while (fPurgeableQueue.count()) {
662                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
663                 fPurgeableQueue.pop();
664             }
665 
666             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1,
667                      CompareTimestamp);
668 
669             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
670             // timestamp and assign new timestamps.
671             int currP = 0;
672             int currNP = 0;
673             while (currP < sortedPurgeableResources.count() &&
674                    currNP < fNonpurgeableResources.count()) {
675                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
676                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
677                 SkASSERT(tsP != tsNP);
678                 if (tsP < tsNP) {
679                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
680                 } else {
681                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
682                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
683                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
684                 }
685             }
686 
687             // The above loop ended when we hit the end of one array. Finish the other one.
688             while (currP < sortedPurgeableResources.count()) {
689                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
690             }
691             while (currNP < fNonpurgeableResources.count()) {
692                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
693                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
694             }
695 
696             // Rebuild the queue.
697             for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
698                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
699             }
700 
701             this->validate();
702             SkASSERT(count == this->getResourceCount());
703 
704             // count should be the next timestamp we return.
705             SkASSERT(fTimestamp == SkToU32(count));
706         }
707     }
708     return fTimestamp++;
709 }
710 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const711 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
712     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
713         fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
714     }
715     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
716         fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
717     }
718 }
719 
720 #ifdef SK_DEBUG
validate() const721 void GrResourceCache::validate() const {
722     // Reduce the frequency of validations for large resource counts.
723     static SkRandom gRandom;
724     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
725     if (~mask && (gRandom.nextU() & mask)) {
726         return;
727     }
728 
729     struct Stats {
730         size_t fBytes;
731         int fBudgetedCount;
732         size_t fBudgetedBytes;
733         int fLocked;
734         int fScratch;
735         int fCouldBeScratch;
736         int fContent;
737         const ScratchMap* fScratchMap;
738         const UniqueHash* fUniqueHash;
739 
740         Stats(const GrResourceCache* cache) {
741             memset(this, 0, sizeof(*this));
742             fScratchMap = &cache->fScratchMap;
743             fUniqueHash = &cache->fUniqueHash;
744         }
745 
746         void update(GrGpuResource* resource) {
747             fBytes += resource->gpuMemorySize();
748 
749             if (!resource->resourcePriv().isPurgeable()) {
750                 ++fLocked;
751             }
752 
753             const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
754             const GrUniqueKey& uniqueKey = resource->getUniqueKey();
755 
756             if (resource->cacheAccess().isScratch()) {
757                 SkASSERT(!uniqueKey.isValid());
758                 ++fScratch;
759                 SkASSERT(fScratchMap->countForKey(scratchKey));
760                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
761             } else if (scratchKey.isValid()) {
762                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
763                          uniqueKey.isValid());
764                 if (!uniqueKey.isValid()) {
765                     ++fCouldBeScratch;
766                     SkASSERT(fScratchMap->countForKey(scratchKey));
767                 }
768                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
769             }
770             if (uniqueKey.isValid()) {
771                 ++fContent;
772                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
773                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
774                          resource->resourcePriv().refsWrappedObjects());
775 
776                 if (scratchKey.isValid()) {
777                     SkASSERT(!fScratchMap->has(resource, scratchKey));
778                 }
779             }
780 
781             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
782                 ++fBudgetedCount;
783                 fBudgetedBytes += resource->gpuMemorySize();
784             }
785         }
786     };
787 
788     {
789         ScratchMap::ConstIter iter(&fScratchMap);
790 
791         int count = 0;
792         for ( ; !iter.done(); ++iter) {
793             const GrGpuResource* resource = *iter;
794             SkASSERT(resource->resourcePriv().getScratchKey().isValid());
795             SkASSERT(!resource->getUniqueKey().isValid());
796             count++;
797         }
798         SkASSERT(count == fScratchMap.count()); // ensure the iterator is working correctly
799     }
800 
801     Stats stats(this);
802     size_t purgeableBytes = 0;
803 
804     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
805         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
806                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
807         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
808         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
809         stats.update(fNonpurgeableResources[i]);
810     }
811     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
812         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
813         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
814         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
815         stats.update(fPurgeableQueue.at(i));
816         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
817     }
818 
819     SkASSERT(fCount == this->getResourceCount());
820     SkASSERT(fBudgetedCount <= fCount);
821     SkASSERT(fBudgetedBytes <= fBytes);
822     SkASSERT(stats.fBytes == fBytes);
823     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
824     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
825     SkASSERT(purgeableBytes == fPurgeableBytes);
826 #if GR_CACHE_STATS
827     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
828     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
829     SkASSERT(fBytes <= fHighWaterBytes);
830     SkASSERT(fCount <= fHighWaterCount);
831     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
832     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
833 #endif
834     SkASSERT(stats.fContent == fUniqueHash.count());
835     SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
836 
837     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
838     // calls. This will be fixed when subresource registration is explicit.
839     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
840     // SkASSERT(!overBudget || locked == count || fPurging);
841 }
842 
isInCache(const GrGpuResource * resource) const843 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
844     int index = *resource->cacheAccess().accessCacheIndex();
845     if (index < 0) {
846         return false;
847     }
848     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
849         return true;
850     }
851     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
852         return true;
853     }
854     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
855     return false;
856 }
857 
858 #endif
859