• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/graphite/ResourceCache.h"
9 
10 #include "include/private/base/SingleOwner.h"
11 #include "src/base/SkRandom.h"
12 #include "src/core/SkTMultiMap.h"
13 #include "src/gpu/graphite/GraphiteResourceKey.h"
14 #include "src/gpu/graphite/Resource.h"
15 
16 namespace skgpu::graphite {
17 
18 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
19 
Make(SingleOwner * singleOwner)20 sk_sp<ResourceCache> ResourceCache::Make(SingleOwner* singleOwner) {
21     return sk_sp<ResourceCache>(new ResourceCache(singleOwner));
22 }
23 
ResourceCache(SingleOwner * singleOwner)24 ResourceCache::ResourceCache(SingleOwner* singleOwner) : fSingleOwner(singleOwner) {
25     // TODO: Maybe when things start using ResourceCache, then like Ganesh the compiler won't
26     // complain about not using fSingleOwner in Release builds and we can delete this.
27 #ifndef SK_DEBUG
28     (void)fSingleOwner;
29 #endif
30 }
31 
~ResourceCache()32 ResourceCache::~ResourceCache() {
33     // The ResourceCache must have been shutdown by the ResourceProvider before it is destroyed.
34     SkASSERT(fIsShutdown);
35 }
36 
shutdown()37 void ResourceCache::shutdown() {
38     ASSERT_SINGLE_OWNER
39 
40     SkASSERT(!fIsShutdown);
41 
42     {
43         SkAutoMutexExclusive locked(fReturnMutex);
44         fIsShutdown = true;
45     }
46     this->processReturnedResources();
47 
48     while (fNonpurgeableResources.size()) {
49         Resource* back = *(fNonpurgeableResources.end() - 1);
50         SkASSERT(!back->wasDestroyed());
51         this->removeFromNonpurgeableArray(back);
52         back->unrefCache();
53     }
54 
55     while (fPurgeableQueue.count()) {
56         Resource* top = fPurgeableQueue.peek();
57         SkASSERT(!top->wasDestroyed());
58         this->removeFromPurgeableQueue(top);
59         top->unrefCache();
60     }
61 }
62 
insertResource(Resource * resource)63 void ResourceCache::insertResource(Resource* resource) {
64     ASSERT_SINGLE_OWNER
65     SkASSERT(resource);
66     SkASSERT(!this->isInCache(resource));
67     SkASSERT(!resource->wasDestroyed());
68     SkASSERT(!resource->isPurgeable());
69     SkASSERT(resource->key().isValid());
70     // All resources in the cache are owned. If we track wrapped resources in the cache we'll need
71     // to update this check.
72     SkASSERT(resource->ownership() == Ownership::kOwned);
73 
74     this->processReturnedResources();
75 
76     resource->registerWithCache(sk_ref_sp(this));
77     resource->refCache();
78 
79     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
80     // up iterating over all the resources that already have timestamps.
81     resource->setTimestamp(this->getNextTimestamp());
82 
83     this->addToNonpurgeableArray(resource);
84 
85     SkDEBUGCODE(fCount++;)
86 
87     if (resource->key().shareable() == Shareable::kYes) {
88         fResourceMap.insert(resource->key(), resource);
89     }
90     // TODO: If the resource is budgeted update our memory usage. Then purge resources if adding
91     // this one put us over budget (when we actually have a budget).
92 }
93 
findAndRefResource(const GraphiteResourceKey & key,skgpu::Budgeted budgeted)94 Resource* ResourceCache::findAndRefResource(const GraphiteResourceKey& key,
95                                             skgpu::Budgeted budgeted) {
96     ASSERT_SINGLE_OWNER
97 
98     this->processReturnedResources();
99 
100     SkASSERT(key.isValid());
101 
102     Resource* resource = fResourceMap.find(key);
103     if (resource) {
104         // All resources we pull out of the cache for use should be budgeted
105         SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
106         if (key.shareable() == Shareable::kNo) {
107             // If a resource is not shareable (i.e. scratch resource) then we remove it from the map
108             // so that it isn't found again.
109             fResourceMap.remove(key, resource);
110             if (budgeted == skgpu::Budgeted::kNo) {
111                 // TODO: Once we track our budget we also need to decrease our usage here since the
112                 // resource no longer counts against the budget.
113                 resource->makeUnbudgeted();
114             }
115             SkDEBUGCODE(resource->fNonShareableInCache = false;)
116         } else {
117             // Shareable resources should never be requested as non budgeted
118             SkASSERT(budgeted == skgpu::Budgeted::kYes);
119         }
120         this->refAndMakeResourceMRU(resource);
121         this->validate();
122     }
123     return resource;
124 }
125 
refAndMakeResourceMRU(Resource * resource)126 void ResourceCache::refAndMakeResourceMRU(Resource* resource) {
127     SkASSERT(resource);
128     SkASSERT(this->isInCache(resource));
129 
130     if (this->inPurgeableQueue(resource)) {
131         // It's about to become unpurgeable.
132         this->removeFromPurgeableQueue(resource);
133         this->addToNonpurgeableArray(resource);
134     }
135     resource->initialUsageRef();
136 
137     resource->setTimestamp(this->getNextTimestamp());
138     this->validate();
139 }
140 
returnResource(Resource * resource,LastRemovedRef removedRef)141 bool ResourceCache::returnResource(Resource* resource, LastRemovedRef removedRef) {
142     // We should never be trying to return a LastRemovedRef of kCache.
143     SkASSERT(removedRef != LastRemovedRef::kCache);
144     SkAutoMutexExclusive locked(fReturnMutex);
145     if (fIsShutdown) {
146         return false;
147     }
148 
149     SkASSERT(resource);
150 
151     // We only allow one instance of a Resource to be in the return queue at a time. We do this so
152     // that the ReturnQueue stays small and quick to process.
153     //
154     // Because we take CacheRefs to all Resources added to the ReturnQueue, we would be safe if we
155     // decided to have multiple instances of a Resource. Even if an earlier returned instance of a
156     // Resource triggers that Resource to get purged from the cache, the Resource itself wouldn't
157     // get deleted until we drop all the CacheRefs in this ReturnQueue.
158     if (*resource->accessReturnIndex() >= 0) {
159         // If the resource is already in the return queue we promote the LastRemovedRef to be
160         // kUsage if that is what is returned here.
161         if (removedRef == LastRemovedRef::kUsage) {
162             SkASSERT(*resource->accessReturnIndex() < (int)fReturnQueue.size());
163             fReturnQueue[*resource->accessReturnIndex()].second = removedRef;
164         }
165         return true;
166     }
167 #ifdef SK_DEBUG
168     for (auto& nextResource : fReturnQueue) {
169         SkASSERT(nextResource.first != resource);
170     }
171 #endif
172     fReturnQueue.push_back(std::make_pair(resource, removedRef));
173     *resource->accessReturnIndex() = fReturnQueue.size() - 1;
174     resource->refCache();
175     return true;
176 }
177 
processReturnedResources()178 void ResourceCache::processReturnedResources() {
179     // We need to move the returned Resources off of the ReturnQueue before we start processing them
180     // so that we can drop the fReturnMutex. When we process a Resource we may need to grab its
181     // UnrefMutex. This could cause a deadlock if on another thread the Resource has the UnrefMutex
182     // and is waiting on the ReturnMutex to be free.
183     ReturnQueue tempQueue;
184     {
185         SkAutoMutexExclusive locked(fReturnMutex);
186         // TODO: Instead of doing a copy of the vector, we may be able to improve the performance
187         // here by storing some form of linked list, then just move the pointer the first element
188         // and reset the ReturnQueue's top element to nullptr.
189         tempQueue = fReturnQueue;
190         fReturnQueue.clear();
191         for (auto& nextResource : tempQueue) {
192             auto [resource, ref] = nextResource;
193             SkASSERT(*resource->accessReturnIndex() >= 0);
194             *resource->accessReturnIndex() = -1;
195         }
196     }
197     for (auto& nextResource : tempQueue) {
198         auto [resource, ref] = nextResource;
199         // We need this check here to handle the following scenario. A Resource is sitting in the
200         // ReturnQueue (say from kUsage last ref) and the Resource still has a command buffer ref
201         // out in the wild. When the ResourceCache calls processReturnedResources it locks the
202         // ReturnMutex. Immediately after this, the command buffer ref is released on another
203         // thread. The Resource cannot be added to the ReturnQueue since the lock is held. Back in
204         // the ResourceCache (we'll drop the ReturnMutex) and when we try to return the Resource we
205         // will see that it is purgeable. If we are overbudget it is possible that the Resource gets
206         // purged from the ResourceCache at this time setting its cache index to -1. The unrefCache
207         // call will actually block here on the Resource's UnrefMutex which is held from the command
208         // buffer ref. Eventually the command bufer ref thread will get to run again and with the
209         // ReturnMutex lock dropped it will get added to the ReturnQueue. At this point the first
210         // unrefCache call will continue on the main ResourceCache thread. When we call
211         // processReturnedResources the next time, we don't want this Resource added back into the
212         // cache, thus we have the check here. The Resource will then get deleted when we call
213         // unrefCache below to remove the cache ref added from the ReturnQueue.
214         if (*resource->accessCacheIndex() != -1) {
215             this->returnResourceToCache(resource, ref);
216         }
217         // Remove cache ref held by ReturnQueue
218         resource->unrefCache();
219     }
220 }
221 
returnResourceToCache(Resource * resource,LastRemovedRef removedRef)222 void ResourceCache::returnResourceToCache(Resource* resource, LastRemovedRef removedRef) {
223     // A resource should not have been destroyed when placed into the return queue. Also before
224     // purging any resources from the cache itself, it should always empty the queue first. When the
225     // cache releases/abandons all of its resources, it first invalidates the return queue so no new
226     // resources can be added. Thus we should not end up in a situation where a resource gets
227     // destroyed after it was added to the return queue.
228     SkASSERT(!resource->wasDestroyed());
229 
230     SkASSERT(this->isInCache(resource));
231     if (removedRef == LastRemovedRef::kUsage) {
232         if (resource->key().shareable() == Shareable::kYes) {
233             // Shareable resources should still be in the cache
234             SkASSERT(fResourceMap.find(resource->key()));
235         } else {
236             SkDEBUGCODE(resource->fNonShareableInCache = true;)
237             fResourceMap.insert(resource->key(), resource);
238             if (resource->budgeted() == skgpu::Budgeted::kNo) {
239                 // TODO: Update budgeted tracking
240                 resource->makeBudgeted();
241             }
242         }
243     }
244 
245     // If we weren't using multiple threads, it is ok to assume a resource that isn't purgeable must
246     // be in the non purgeable array. However, since resources can be unreffed from multiple
247     // threads, it is possible that a resource became purgeable while we are in the middle of
248     // returning resources. For example, a resource could have 1 usage and 1 command buffer ref. We
249     // then unref the usage which puts the resource in the return queue. Then the ResourceCache
250     // thread locks the ReturnQueue as it returns the Resource. At this same time another thread
251     // unrefs the command buffer usage but can't add the Resource to the ReturnQueue as it is
252     // locked (but the command buffer ref has been reduced to zero). When we are processing the
253     // Resource (from the kUsage ref) to return it to the cache it will look like it is purgeable
254     // since all refs are zero. Thus we will move the Resource from the non purgeable to purgeable
255     // queue. Then later when we return the command buffer ref, the Resource will have already been
256     // moved to purgeable queue and we don't need to do it again.
257     if (!resource->isPurgeable() || this->inPurgeableQueue(resource)) {
258         this->validate();
259         return;
260     }
261 
262     resource->setTimestamp(this->getNextTimestamp());
263 
264     this->removeFromNonpurgeableArray(resource);
265     fPurgeableQueue.insert(resource);
266     this->validate();
267 }
268 
addToNonpurgeableArray(Resource * resource)269 void ResourceCache::addToNonpurgeableArray(Resource* resource) {
270     int index = fNonpurgeableResources.size();
271     *fNonpurgeableResources.append() = resource;
272     *resource->accessCacheIndex() = index;
273 }
274 
removeFromNonpurgeableArray(Resource * resource)275 void ResourceCache::removeFromNonpurgeableArray(Resource* resource) {
276     int* index = resource->accessCacheIndex();
277     // Fill the hole we will create in the array with the tail object, adjust its index, and
278     // then pop the array
279     Resource* tail = *(fNonpurgeableResources.end() - 1);
280     SkASSERT(fNonpurgeableResources[*index] == resource);
281     fNonpurgeableResources[*index] = tail;
282     *tail->accessCacheIndex() = *index;
283     fNonpurgeableResources.pop_back();
284     *index = -1;
285 }
286 
removeFromPurgeableQueue(Resource * resource)287 void ResourceCache::removeFromPurgeableQueue(Resource* resource) {
288     fPurgeableQueue.remove(resource);
289     // SkTDPQueue will set the index back to -1 in debug builds, but we are using the index as a
290     // flag for whether the Resource has been purged from the cache or not. So we need to make sure
291     // it always gets set.
292     *resource->accessCacheIndex() = -1;
293 }
294 
inPurgeableQueue(Resource * resource) const295 bool ResourceCache::inPurgeableQueue(Resource* resource) const {
296     SkASSERT(this->isInCache(resource));
297     int index = *resource->accessCacheIndex();
298     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
299         return true;
300     }
301     return false;
302 }
303 
getNextTimestamp()304 uint32_t ResourceCache::getNextTimestamp() {
305     // If we wrap then all the existing resources will appear older than any resources that get
306     // a timestamp after the wrap.
307     if (0 == fTimestamp) {
308         int count = this->getResourceCount();
309         if (count) {
310             // Reset all the timestamps. We sort the resources by timestamp and then assign
311             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
312             // rare.
313             SkTDArray<Resource*> sortedPurgeableResources;
314             sortedPurgeableResources.reserve(fPurgeableQueue.count());
315 
316             while (fPurgeableQueue.count()) {
317                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
318                 fPurgeableQueue.pop();
319             }
320 
321             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
322                      CompareTimestamp);
323 
324             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
325             // timestamp and assign new timestamps.
326             int currP = 0;
327             int currNP = 0;
328             while (currP < sortedPurgeableResources.size() &&
329                    currNP < fNonpurgeableResources.size()) {
330                 uint32_t tsP = sortedPurgeableResources[currP]->timestamp();
331                 uint32_t tsNP = fNonpurgeableResources[currNP]->timestamp();
332                 SkASSERT(tsP != tsNP);
333                 if (tsP < tsNP) {
334                     sortedPurgeableResources[currP++]->setTimestamp(fTimestamp++);
335                 } else {
336                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
337                     *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
338                     fNonpurgeableResources[currNP++]->setTimestamp(fTimestamp++);
339                 }
340             }
341 
342             // The above loop ended when we hit the end of one array. Finish the other one.
343             while (currP < sortedPurgeableResources.size()) {
344                 sortedPurgeableResources[currP++]->setTimestamp(fTimestamp++);
345             }
346             while (currNP < fNonpurgeableResources.size()) {
347                 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
348                 fNonpurgeableResources[currNP++]->setTimestamp(fTimestamp++);
349             }
350 
351             // Rebuild the queue.
352             for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
353                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
354             }
355 
356             this->validate();
357             SkASSERT(count == this->getResourceCount());
358 
359             // count should be the next timestamp we return.
360             SkASSERT(fTimestamp == SkToU32(count));
361         }
362     }
363     return fTimestamp++;
364 }
365 
366 ////////////////////////////////////////////////////////////////////////////////
367 
GetKey(const Resource & r)368 const GraphiteResourceKey& ResourceCache::MapTraits::GetKey(const Resource& r) {
369     return r.key();
370 }
371 
Hash(const GraphiteResourceKey & key)372 uint32_t ResourceCache::MapTraits::Hash(const GraphiteResourceKey& key) {
373     return key.hash();
374 }
375 
CompareTimestamp(Resource * const & a,Resource * const & b)376 bool ResourceCache::CompareTimestamp(Resource* const& a, Resource* const& b) {
377     return a->timestamp() < b->timestamp();
378 }
379 
AccessResourceIndex(Resource * const & res)380 int* ResourceCache::AccessResourceIndex(Resource* const& res) {
381     return res->accessCacheIndex();
382 }
383 
384 #ifdef SK_DEBUG
validate() const385 void ResourceCache::validate() const {
386     // Reduce the frequency of validations for large resource counts.
387     static SkRandom gRandom;
388     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
389     if (~mask && (gRandom.nextU() & mask)) {
390         return;
391     }
392 
393     struct Stats {
394         int fShareable;
395         int fScratch;
396         const ResourceMap* fResourceMap;
397 
398         Stats(const ResourceCache* cache) {
399             memset(this, 0, sizeof(*this));
400             fResourceMap = &cache->fResourceMap;
401         }
402 
403         void update(Resource* resource) {
404             const GraphiteResourceKey& key = resource->key();
405             SkASSERT(key.isValid());
406 
407             // We should always have at least 1 cache ref
408             SkASSERT(resource->hasCacheRef());
409 
410             // All resources in the cache are owned. If we track wrapped resources in the cache
411             // we'll need to update this check.
412             SkASSERT(resource->ownership() == Ownership::kOwned);
413 
414             // We track scratch (non-shareable, no usage refs, has been returned to cache) and
415             // shareable resources here as those should be the only things in the fResourceMap. A
416             // non-shareable resources that does meet the scratch criteria will not be able to be
417             // given back out from a cache requests. After processing all the resources we assert
418             // that the fScratch + fShareable equals the count in the fResourceMap.
419             if (resource->isUsableAsScratch()) {
420                 SkASSERT(key.shareable() == Shareable::kNo);
421                 SkASSERT(!resource->hasUsageRef());
422                 ++fScratch;
423                 SkASSERT(fResourceMap->has(resource, key));
424                 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
425             } else if (key.shareable() == Shareable::kNo) {
426                 SkASSERT(!fResourceMap->has(resource, key));
427             } else {
428                 SkASSERT(key.shareable() == Shareable::kYes);
429                 ++fShareable;
430                 SkASSERT(fResourceMap->has(resource, key));
431                 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
432             }
433         }
434     };
435 
436     {
437         int count = 0;
438         fResourceMap.foreach([&](const Resource& resource) {
439             SkASSERT(resource.isUsableAsScratch() || resource.key().shareable() == Shareable::kYes);
440             SkASSERT(resource.budgeted() == skgpu::Budgeted::kYes);
441             count++;
442         });
443         SkASSERT(count == fResourceMap.count());
444     }
445 
446     // In the below checks we can assert that anything in the purgeable queue is purgeable because
447     // we won't put a Resource into that queue unless all refs are zero. Thus there is no way for
448     // that resource to be made non-purgeable without going through the cache (which will switch
449     // queues back to non-purgeable).
450     //
451     // However, we can't say the same for things in the non-purgeable array. It is possible that
452     // Resources have removed all their refs (thus technically become purgeable) but have not been
453     // processed back into the cache yet. Thus we may not have moved resources to the purgeable
454     // queue yet. Its also possible that Resource hasn't been added to the ReturnQueue yet (thread
455     // paused between unref and adding to ReturnQueue) so we can't even make asserts like not
456     // purgeable or is in ReturnQueue.
457     Stats stats(this);
458     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
459         SkASSERT(*fNonpurgeableResources[i]->accessCacheIndex() == i);
460         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
461         SkASSERT(!this->inPurgeableQueue(fNonpurgeableResources[i]));
462         stats.update(fNonpurgeableResources[i]);
463     }
464     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
465         SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
466         SkASSERT(*fPurgeableQueue.at(i)->accessCacheIndex() == i);
467         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
468         stats.update(fPurgeableQueue.at(i));
469     }
470 
471     SkASSERT((stats.fScratch + stats.fShareable) == fResourceMap.count());
472 }
473 
isInCache(const Resource * resource) const474 bool ResourceCache::isInCache(const Resource* resource) const {
475     int index = *resource->accessCacheIndex();
476     if (index < 0) {
477         return false;
478     }
479     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
480         return true;
481     }
482     if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
483         return true;
484     }
485     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
486     return false;
487 }
488 
489 #endif // SK_DEBUG
490 
491 #if GRAPHITE_TEST_UTILS
492 
numFindableResources() const493 int ResourceCache::numFindableResources() const {
494     return fResourceMap.count();
495 }
496 
497 #endif // GRAPHITE_TEST_UTILS
498 
499 } // namespace skgpu::graphite
500