1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/ResourceCache.h"
9
10 #include "include/private/base/SingleOwner.h"
11 #include "src/base/SkRandom.h"
12 #include "src/core/SkTMultiMap.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/graphite/GraphiteResourceKey.h"
15 #include "src/gpu/graphite/ProxyCache.h"
16 #include "src/gpu/graphite/Resource.h"
17
18 #if defined(GPU_TEST_UTILS)
19 #include "src/gpu/graphite/Texture.h"
20 #endif
21
22 namespace skgpu::graphite {
23
24 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
25
Make(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)26 sk_sp<ResourceCache> ResourceCache::Make(SingleOwner* singleOwner,
27 uint32_t recorderID,
28 size_t maxBytes) {
29 return sk_sp<ResourceCache>(new ResourceCache(singleOwner, recorderID, maxBytes));
30 }
31
ResourceCache(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)32 ResourceCache::ResourceCache(SingleOwner* singleOwner, uint32_t recorderID, size_t maxBytes)
33 : fMaxBytes(maxBytes)
34 , fSingleOwner(singleOwner) {
35 if (recorderID != SK_InvalidGenID) {
36 fProxyCache = std::make_unique<ProxyCache>(recorderID);
37 }
38 // TODO: Maybe when things start using ResourceCache, then like Ganesh the compiler won't
39 // complain about not using fSingleOwner in Release builds and we can delete this.
40 #ifndef SK_DEBUG
41 (void)fSingleOwner;
42 #endif
43 }
44
~ResourceCache()45 ResourceCache::~ResourceCache() {
46 // The ResourceCache must have been shutdown by the ResourceProvider before it is destroyed.
47 SkASSERT(fIsShutdown);
48 }
49
shutdown()50 void ResourceCache::shutdown() {
51 ASSERT_SINGLE_OWNER
52
53 SkASSERT(!fIsShutdown);
54
55 {
56 SkAutoMutexExclusive locked(fReturnMutex);
57 fIsShutdown = true;
58 }
59 if (fProxyCache) {
60 fProxyCache->purgeAll();
61 }
62
63 this->processReturnedResources();
64
65 while (!fNonpurgeableResources.empty()) {
66 Resource* back = *(fNonpurgeableResources.end() - 1);
67 SkASSERT(!back->wasDestroyed());
68 this->removeFromNonpurgeableArray(back);
69 back->unrefCache();
70 }
71
72 while (fPurgeableQueue.count()) {
73 Resource* top = fPurgeableQueue.peek();
74 SkASSERT(!top->wasDestroyed());
75 this->removeFromPurgeableQueue(top);
76 top->unrefCache();
77 }
78
79 TRACE_EVENT_INSTANT0("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD);
80 }
81
insertResource(Resource * resource)82 void ResourceCache::insertResource(Resource* resource) {
83 ASSERT_SINGLE_OWNER
84 SkASSERT(resource);
85 SkASSERT(!this->isInCache(resource));
86 SkASSERT(!resource->wasDestroyed());
87 SkASSERT(!resource->isPurgeable());
88 SkASSERT(resource->key().isValid());
89 // All resources in the cache are owned. If we track wrapped resources in the cache we'll need
90 // to update this check.
91 SkASSERT(resource->ownership() == Ownership::kOwned);
92
93 // Make sure we have the most accurate memory size for "memoryless" resources.
94 resource->updateGpuMemorySize();
95
96 // The reason to call processReturnedResources here is to get an accurate accounting of our
97 // memory usage as some resources can go from unbudgeted to budgeted when they return. So we
98 // want to have them all returned before adding the budget for the new resource in case we need
99 // to purge things. However, if the new resource has a memory size of 0, then we just skip
100 // returning resources (which has overhead for each call) since the new resource won't be
101 // affecting whether we're over or under budget.
102 if (resource->gpuMemorySize() > 0) {
103 this->processReturnedResources();
104 }
105
106 resource->registerWithCache(sk_ref_sp(this));
107 resource->refCache();
108
109 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
110 // up iterating over all the resources that already have timestamps.
111 this->setResourceTimestamp(resource, this->getNextTimestamp());
112 resource->updateAccessTime();
113
114 this->addToNonpurgeableArray(resource);
115
116 SkDEBUGCODE(fCount++;)
117
118 if (resource->key().shareable() == Shareable::kYes) {
119 fResourceMap.insert(resource->key(), resource);
120 }
121
122 if (resource->budgeted() == skgpu::Budgeted::kYes) {
123 fBudgetedBytes += resource->gpuMemorySize();
124 }
125
126 this->purgeAsNeeded();
127 }
128
findAndRefResource(const GraphiteResourceKey & key,skgpu::Budgeted budgeted)129 Resource* ResourceCache::findAndRefResource(const GraphiteResourceKey& key,
130 skgpu::Budgeted budgeted) {
131 ASSERT_SINGLE_OWNER
132
133 SkASSERT(key.isValid());
134
135 Resource* resource = fResourceMap.find(key);
136 if (!resource) {
137 // The main reason to call processReturnedResources in this call is to see if there are any
138 // resources that we could match with the key. However, there is overhead into calling it.
139 // So we only call it if we first failed to find a matching resource.
140 if (this->processReturnedResources()) {
141 resource = fResourceMap.find(key);
142 }
143 }
144 if (resource) {
145 // All resources we pull out of the cache for use should be budgeted
146 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
147 if (key.shareable() == Shareable::kNo) {
148 // If a resource is not shareable (i.e. scratch resource) then we remove it from the map
149 // so that it isn't found again.
150 fResourceMap.remove(key, resource);
151 if (budgeted == skgpu::Budgeted::kNo) {
152 resource->makeUnbudgeted();
153 fBudgetedBytes -= resource->gpuMemorySize();
154 }
155 SkDEBUGCODE(resource->fNonShareableInCache = false;)
156 } else {
157 // Shareable resources should never be requested as non budgeted
158 SkASSERT(budgeted == skgpu::Budgeted::kYes);
159 }
160 this->refAndMakeResourceMRU(resource);
161 this->validate();
162 }
163
164 // processReturnedResources may have added resources back into our budget if they were being
165 // using in an SkImage or SkSurface previously. However, instead of calling purgeAsNeeded in
166 // processReturnedResources, we delay calling it until now so we don't end up purging a resource
167 // we're looking for in this function.
168 //
169 // We could avoid calling this if we didn't return any resources from processReturnedResources.
170 // However, when not overbudget purgeAsNeeded is very cheap. When overbudget there may be some
171 // really niche usage patterns that could cause us to never actually return resources to the
172 // cache, but still be overbudget due to shared resources. So to be safe we just always call it
173 // here.
174 this->purgeAsNeeded();
175
176 return resource;
177 }
178
refAndMakeResourceMRU(Resource * resource)179 void ResourceCache::refAndMakeResourceMRU(Resource* resource) {
180 SkASSERT(resource);
181 SkASSERT(this->isInCache(resource));
182
183 if (this->inPurgeableQueue(resource)) {
184 // It's about to become unpurgeable.
185 this->removeFromPurgeableQueue(resource);
186 this->addToNonpurgeableArray(resource);
187 }
188 resource->initialUsageRef();
189
190 this->setResourceTimestamp(resource, this->getNextTimestamp());
191 this->validate();
192 }
193
returnResource(Resource * resource,LastRemovedRef removedRef)194 bool ResourceCache::returnResource(Resource* resource, LastRemovedRef removedRef) {
195 // We should never be trying to return a LastRemovedRef of kCache.
196 SkASSERT(removedRef != LastRemovedRef::kCache);
197 SkAutoMutexExclusive locked(fReturnMutex);
198 if (fIsShutdown) {
199 return false;
200 }
201
202 SkASSERT(resource);
203
204 // When a non-shareable resource's CB and Usage refs are both zero, give it a chance prepare
205 // itself to be reused. On Dawn/WebGPU we use this to remap kXferCpuToGpu buffers asynchronously
206 // so that they are already mapped before they come out of the cache again.
207 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo &&
208 resource->key().shareable() == Shareable::kNo &&
209 removedRef == LastRemovedRef::kUsage) {
210 resource->prepareForReturnToCache([resource] { resource->initialUsageRef(); });
211 // Check if resource was re-ref'ed. In that case exit without adding to the queue.
212 if (resource->hasUsageRef()) {
213 return true;
214 }
215 }
216
217 // We only allow one instance of a Resource to be in the return queue at a time. We do this so
218 // that the ReturnQueue stays small and quick to process.
219 //
220 // Because we take CacheRefs to all Resources added to the ReturnQueue, we would be safe if we
221 // decided to have multiple instances of a Resource. Even if an earlier returned instance of a
222 // Resource triggers that Resource to get purged from the cache, the Resource itself wouldn't
223 // get deleted until we drop all the CacheRefs in this ReturnQueue.
224 if (*resource->accessReturnIndex() >= 0) {
225 // If the resource is already in the return queue we promote the LastRemovedRef to be
226 // kUsage if that is what is returned here.
227 if (removedRef == LastRemovedRef::kUsage) {
228 SkASSERT(*resource->accessReturnIndex() < (int)fReturnQueue.size());
229 fReturnQueue[*resource->accessReturnIndex()].second = removedRef;
230 }
231 return true;
232 }
233 #ifdef SK_DEBUG
234 for (auto& nextResource : fReturnQueue) {
235 SkASSERT(nextResource.first != resource);
236 }
237 #endif
238
239 fReturnQueue.push_back(std::make_pair(resource, removedRef));
240 *resource->accessReturnIndex() = fReturnQueue.size() - 1;
241 resource->refCache();
242 return true;
243 }
244
processReturnedResources()245 bool ResourceCache::processReturnedResources() {
246 // We need to move the returned Resources off of the ReturnQueue before we start processing them
247 // so that we can drop the fReturnMutex. When we process a Resource we may need to grab its
248 // UnrefMutex. This could cause a deadlock if on another thread the Resource has the UnrefMutex
249 // and is waiting on the ReturnMutex to be free.
250 ReturnQueue tempQueue;
251 {
252 SkAutoMutexExclusive locked(fReturnMutex);
253 // TODO: Instead of doing a copy of the vector, we may be able to improve the performance
254 // here by storing some form of linked list, then just move the pointer the first element
255 // and reset the ReturnQueue's top element to nullptr.
256 tempQueue = fReturnQueue;
257 fReturnQueue.clear();
258 for (auto& nextResource : tempQueue) {
259 auto [resource, ref] = nextResource;
260 SkASSERT(*resource->accessReturnIndex() >= 0);
261 *resource->accessReturnIndex() = -1;
262 }
263 }
264
265 if (tempQueue.empty()) {
266 return false;
267 }
268
269 // Trace after the lock has been released so we can simply record the tempQueue size.
270 TRACE_EVENT1("skia.gpu.cache", TRACE_FUNC, "count", tempQueue.size());
271
272 for (auto& nextResource : tempQueue) {
273 auto [resource, ref] = nextResource;
274 // We need this check here to handle the following scenario. A Resource is sitting in the
275 // ReturnQueue (say from kUsage last ref) and the Resource still has a command buffer ref
276 // out in the wild. When the ResourceCache calls processReturnedResources it locks the
277 // ReturnMutex. Immediately after this, the command buffer ref is released on another
278 // thread. The Resource cannot be added to the ReturnQueue since the lock is held. Back in
279 // the ResourceCache (we'll drop the ReturnMutex) and when we try to return the Resource we
280 // will see that it is purgeable. If we are overbudget it is possible that the Resource gets
281 // purged from the ResourceCache at this time setting its cache index to -1. The unrefCache
282 // call will actually block here on the Resource's UnrefMutex which is held from the command
283 // buffer ref. Eventually the command bufer ref thread will get to run again and with the
284 // ReturnMutex lock dropped it will get added to the ReturnQueue. At this point the first
285 // unrefCache call will continue on the main ResourceCache thread. When we call
286 // processReturnedResources the next time, we don't want this Resource added back into the
287 // cache, thus we have the check here. The Resource will then get deleted when we call
288 // unrefCache below to remove the cache ref added from the ReturnQueue.
289 if (*resource->accessCacheIndex() != -1) {
290 this->returnResourceToCache(resource, ref);
291 }
292 // Remove cache ref held by ReturnQueue
293 resource->unrefCache();
294 }
295 return true;
296 }
297
returnResourceToCache(Resource * resource,LastRemovedRef removedRef)298 void ResourceCache::returnResourceToCache(Resource* resource, LastRemovedRef removedRef) {
299 // A resource should not have been destroyed when placed into the return queue. Also before
300 // purging any resources from the cache itself, it should always empty the queue first. When the
301 // cache releases/abandons all of its resources, it first invalidates the return queue so no new
302 // resources can be added. Thus we should not end up in a situation where a resource gets
303 // destroyed after it was added to the return queue.
304 SkASSERT(!resource->wasDestroyed());
305
306 SkASSERT(this->isInCache(resource));
307 if (removedRef == LastRemovedRef::kUsage) {
308 if (resource->key().shareable() == Shareable::kYes) {
309 // Shareable resources should still be in the cache
310 SkASSERT(fResourceMap.find(resource->key()));
311 } else {
312 SkDEBUGCODE(resource->fNonShareableInCache = true;)
313 resource->setLabel("Scratch");
314 fResourceMap.insert(resource->key(), resource);
315 if (resource->budgeted() == skgpu::Budgeted::kNo) {
316 resource->makeBudgeted();
317 fBudgetedBytes += resource->gpuMemorySize();
318 }
319 }
320 }
321
322 if (resource->budgeted() == skgpu::Budgeted::kYes) {
323 size_t oldSize = resource->gpuMemorySize();
324 resource->updateGpuMemorySize();
325 if (oldSize != resource->gpuMemorySize()) {
326 fBudgetedBytes -= oldSize;
327 fBudgetedBytes += resource->gpuMemorySize();
328 }
329 }
330
331 // If we weren't using multiple threads, it is ok to assume a resource that isn't purgeable must
332 // be in the non purgeable array. However, since resources can be unreffed from multiple
333 // threads, it is possible that a resource became purgeable while we are in the middle of
334 // returning resources. For example, a resource could have 1 usage and 1 command buffer ref. We
335 // then unref the usage which puts the resource in the return queue. Then the ResourceCache
336 // thread locks the ReturnQueue as it returns the Resource. At this same time another thread
337 // unrefs the command buffer usage but can't add the Resource to the ReturnQueue as it is
338 // locked (but the command buffer ref has been reduced to zero). When we are processing the
339 // Resource (from the kUsage ref) to return it to the cache it will look like it is purgeable
340 // since all refs are zero. Thus we will move the Resource from the non purgeable to purgeable
341 // queue. Then later when we return the command buffer ref, the Resource will have already been
342 // moved to purgeable queue and we don't need to do it again.
343 if (!resource->isPurgeable() || this->inPurgeableQueue(resource)) {
344 this->validate();
345 return;
346 }
347
348 this->setResourceTimestamp(resource, this->getNextTimestamp());
349
350 this->removeFromNonpurgeableArray(resource);
351
352 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kYes) {
353 this->purgeResource(resource);
354 } else {
355 resource->updateAccessTime();
356 fPurgeableQueue.insert(resource);
357 fPurgeableBytes += resource->gpuMemorySize();
358 }
359 this->validate();
360 }
361
addToNonpurgeableArray(Resource * resource)362 void ResourceCache::addToNonpurgeableArray(Resource* resource) {
363 int index = fNonpurgeableResources.size();
364 *fNonpurgeableResources.append() = resource;
365 *resource->accessCacheIndex() = index;
366 }
367
removeFromNonpurgeableArray(Resource * resource)368 void ResourceCache::removeFromNonpurgeableArray(Resource* resource) {
369 int* index = resource->accessCacheIndex();
370 // Fill the hole we will create in the array with the tail object, adjust its index, and
371 // then pop the array
372 Resource* tail = *(fNonpurgeableResources.end() - 1);
373 SkASSERT(fNonpurgeableResources[*index] == resource);
374 fNonpurgeableResources[*index] = tail;
375 *tail->accessCacheIndex() = *index;
376 fNonpurgeableResources.pop_back();
377 *index = -1;
378 }
379
removeFromPurgeableQueue(Resource * resource)380 void ResourceCache::removeFromPurgeableQueue(Resource* resource) {
381 fPurgeableQueue.remove(resource);
382 fPurgeableBytes -= resource->gpuMemorySize();
383 // SkTDPQueue will set the index back to -1 in debug builds, but we are using the index as a
384 // flag for whether the Resource has been purged from the cache or not. So we need to make sure
385 // it always gets set.
386 *resource->accessCacheIndex() = -1;
387 }
388
inPurgeableQueue(Resource * resource) const389 bool ResourceCache::inPurgeableQueue(Resource* resource) const {
390 SkASSERT(this->isInCache(resource));
391 int index = *resource->accessCacheIndex();
392 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
393 return true;
394 }
395 return false;
396 }
397
purgeResource(Resource * resource)398 void ResourceCache::purgeResource(Resource* resource) {
399 SkASSERT(resource->isPurgeable());
400
401 TRACE_EVENT_INSTANT1("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
402 "size", resource->gpuMemorySize());
403
404 fResourceMap.remove(resource->key(), resource);
405
406 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo) {
407 SkASSERT(this->inPurgeableQueue(resource));
408 this->removeFromPurgeableQueue(resource);
409 } else {
410 SkASSERT(!this->isInCache(resource));
411 }
412
413 fBudgetedBytes -= resource->gpuMemorySize();
414 resource->unrefCache();
415 }
416
purgeAsNeeded()417 void ResourceCache::purgeAsNeeded() {
418 ASSERT_SINGLE_OWNER
419
420 if (this->overbudget() && fProxyCache) {
421 fProxyCache->freeUniquelyHeld();
422
423 // After the image cache frees resources we need to return those resources to the cache
424 this->processReturnedResources();
425 }
426 while (this->overbudget() && fPurgeableQueue.count()) {
427 Resource* resource = fPurgeableQueue.peek();
428 SkASSERT(!resource->wasDestroyed());
429 SkASSERT(fResourceMap.find(resource->key()));
430
431 if (resource->timestamp() == kMaxTimestamp) {
432 // If we hit a resource that is at kMaxTimestamp, then we've hit the part of the
433 // purgeable queue with all zero sized resources. We don't want to actually remove those
434 // so we just break here.
435 SkASSERT(resource->gpuMemorySize() == 0);
436 break;
437 }
438
439 this->purgeResource(resource);
440 }
441
442 this->validate();
443 }
444
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)445 void ResourceCache::purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
446 ASSERT_SINGLE_OWNER
447 this->purgeResources(&purgeTime);
448 }
449
purgeResources()450 void ResourceCache::purgeResources() {
451 ASSERT_SINGLE_OWNER
452 this->purgeResources(nullptr);
453 }
454
purgeResources(const StdSteadyClock::time_point * purgeTime)455 void ResourceCache::purgeResources(const StdSteadyClock::time_point* purgeTime) {
456 TRACE_EVENT0("skia.gpu.cache", TRACE_FUNC);
457 if (fProxyCache) {
458 fProxyCache->purgeProxiesNotUsedSince(purgeTime);
459 }
460 this->processReturnedResources();
461
462 // Early out if the very first item is too new to purge to avoid sorting the queue when
463 // nothing will be deleted.
464 if (fPurgeableQueue.count() &&
465 purgeTime &&
466 fPurgeableQueue.peek()->lastAccessTime() >= *purgeTime) {
467 return;
468 }
469
470 // Sort the queue
471 fPurgeableQueue.sort();
472
473 // Make a list of the scratch resources to delete
474 SkTDArray<Resource*> resourcesToPurge;
475 for (int i = 0; i < fPurgeableQueue.count(); i++) {
476 Resource* resource = fPurgeableQueue.at(i);
477
478 const skgpu::StdSteadyClock::time_point resourceTime = resource->lastAccessTime();
479 if (purgeTime && resourceTime >= *purgeTime) {
480 // scratch or not, all later iterations will be too recently used to purge.
481 break;
482 }
483 SkASSERT(resource->isPurgeable());
484 *resourcesToPurge.append() = resource;
485 }
486
487 // Delete the scratch resources. This must be done as a separate pass
488 // to avoid messing up the sorted order of the queue
489 for (int i = 0; i < resourcesToPurge.size(); i++) {
490 this->purgeResource(resourcesToPurge[i]);
491 }
492
493 // Since we called process returned resources at the start of this call, we could still end up
494 // over budget even after purging resources based on purgeTime. So we call purgeAsNeeded at the
495 // end here.
496 this->purgeAsNeeded();
497 }
498
getNextTimestamp()499 uint32_t ResourceCache::getNextTimestamp() {
500 // If we wrap then all the existing resources will appear older than any resources that get
501 // a timestamp after the wrap. We wrap one value early when we reach kMaxTimestamp so that we
502 // can continue to use kMaxTimestamp as a special case for zero sized resources.
503 if (fTimestamp == kMaxTimestamp) {
504 fTimestamp = 0;
505 int count = this->getResourceCount();
506 if (count) {
507 // Reset all the timestamps. We sort the resources by timestamp and then assign
508 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
509 // rare.
510 SkTDArray<Resource*> sortedPurgeableResources;
511 sortedPurgeableResources.reserve(fPurgeableQueue.count());
512
513 while (fPurgeableQueue.count()) {
514 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
515 fPurgeableQueue.pop();
516 }
517
518 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
519 CompareTimestamp);
520
521 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
522 // timestamp and assign new timestamps.
523 int currP = 0;
524 int currNP = 0;
525 while (currP < sortedPurgeableResources.size() &&
526 currNP < fNonpurgeableResources.size()) {
527 uint32_t tsP = sortedPurgeableResources[currP]->timestamp();
528 uint32_t tsNP = fNonpurgeableResources[currNP]->timestamp();
529 SkASSERT(tsP != tsNP);
530 if (tsP < tsNP) {
531 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
532 } else {
533 // Correct the index in the nonpurgeable array stored on the resource post-sort.
534 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
535 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
536 }
537 }
538
539 // The above loop ended when we hit the end of one array. Finish the other one.
540 while (currP < sortedPurgeableResources.size()) {
541 this->setResourceTimestamp(sortedPurgeableResources[currP++], fTimestamp++);
542 }
543 while (currNP < fNonpurgeableResources.size()) {
544 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
545 this->setResourceTimestamp(fNonpurgeableResources[currNP++], fTimestamp++);
546 }
547
548 // Rebuild the queue.
549 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
550 fPurgeableQueue.insert(sortedPurgeableResources[i]);
551 }
552
553 this->validate();
554 SkASSERT(count == this->getResourceCount());
555
556 // count should be the next timestamp we return.
557 SkASSERT(fTimestamp == SkToU32(count));
558 }
559 }
560 return fTimestamp++;
561 }
562
setResourceTimestamp(Resource * resource,uint32_t timestamp)563 void ResourceCache::setResourceTimestamp(Resource* resource, uint32_t timestamp) {
564 // We always set the timestamp for zero sized resources to be kMaxTimestamp
565 if (resource->gpuMemorySize() == 0) {
566 timestamp = kMaxTimestamp;
567 }
568 resource->setTimestamp(timestamp);
569 }
570
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const571 void ResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
572 ASSERT_SINGLE_OWNER
573
574 // There is no need to process the return queue here. Resources in the queue are still in
575 // either the purgeable queue or the nonpurgeable resources list (likely to be moved to the
576 // purgeable queue). However, the Resource's own ref counts are used to report its purgeable
577 // state to the memory dump, which is accurate without draining the return queue.
578
579 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
580 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
581 }
582 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
583 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
584 }
585 }
586
setMaxBudget(size_t bytes)587 void ResourceCache::setMaxBudget(size_t bytes) {
588 fMaxBytes = bytes;
589 this->processReturnedResources();
590 this->purgeAsNeeded();
591 }
592
593 ////////////////////////////////////////////////////////////////////////////////
594
GetKey(const Resource & r)595 const GraphiteResourceKey& ResourceCache::MapTraits::GetKey(const Resource& r) {
596 return r.key();
597 }
598
Hash(const GraphiteResourceKey & key)599 uint32_t ResourceCache::MapTraits::Hash(const GraphiteResourceKey& key) {
600 return key.hash();
601 }
602
CompareTimestamp(Resource * const & a,Resource * const & b)603 bool ResourceCache::CompareTimestamp(Resource* const& a, Resource* const& b) {
604 return a->timestamp() < b->timestamp();
605 }
606
AccessResourceIndex(Resource * const & res)607 int* ResourceCache::AccessResourceIndex(Resource* const& res) {
608 return res->accessCacheIndex();
609 }
610
611 #ifdef SK_DEBUG
validate() const612 void ResourceCache::validate() const {
613 // Reduce the frequency of validations for large resource counts.
614 static SkRandom gRandom;
615 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
616 if (~mask && (gRandom.nextU() & mask)) {
617 return;
618 }
619
620 struct Stats {
621 int fShareable;
622 int fScratch;
623 size_t fBudgetedBytes;
624 size_t fPurgeableBytes;
625 const ResourceMap* fResourceMap;
626 const PurgeableQueue* fPurgeableQueue;
627
628 Stats(const ResourceCache* cache) {
629 memset(this, 0, sizeof(*this));
630 fResourceMap = &cache->fResourceMap;
631 fPurgeableQueue = &cache->fPurgeableQueue;
632 }
633
634 void update(Resource* resource) {
635 const GraphiteResourceKey& key = resource->key();
636 SkASSERT(key.isValid());
637
638 // We should always have at least 1 cache ref
639 SkASSERT(resource->hasCacheRef());
640
641 // All resources in the cache are owned. If we track wrapped resources in the cache
642 // we'll need to update this check.
643 SkASSERT(resource->ownership() == Ownership::kOwned);
644
645 // We track scratch (non-shareable, no usage refs, has been returned to cache) and
646 // shareable resources here as those should be the only things in the fResourceMap. A
647 // non-shareable resources that does meet the scratch criteria will not be able to be
648 // given back out from a cache requests. After processing all the resources we assert
649 // that the fScratch + fShareable equals the count in the fResourceMap.
650 if (resource->isUsableAsScratch()) {
651 SkASSERT(key.shareable() == Shareable::kNo);
652 SkASSERT(!resource->hasUsageRef());
653 ++fScratch;
654 SkASSERT(fResourceMap->has(resource, key));
655 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
656 } else if (key.shareable() == Shareable::kNo) {
657 SkASSERT(!fResourceMap->has(resource, key));
658 } else {
659 SkASSERT(key.shareable() == Shareable::kYes);
660 ++fShareable;
661 SkASSERT(fResourceMap->has(resource, key));
662 SkASSERT(resource->budgeted() == skgpu::Budgeted::kYes);
663 }
664
665 if (resource->budgeted() == skgpu::Budgeted::kYes) {
666 fBudgetedBytes += resource->gpuMemorySize();
667 }
668
669 if (resource->gpuMemorySize() == 0) {
670 SkASSERT(resource->timestamp() == kMaxTimestamp);
671 } else {
672 SkASSERT(resource->timestamp() < kMaxTimestamp);
673 }
674
675 int index = *resource->accessCacheIndex();
676 if (index < fPurgeableQueue->count() && fPurgeableQueue->at(index) == resource) {
677 SkASSERT(resource->isPurgeable());
678 fPurgeableBytes += resource->gpuMemorySize();
679 }
680 }
681 };
682
683 {
684 int count = 0;
685 fResourceMap.foreach([&](const Resource& resource) {
686 SkASSERT(resource.isUsableAsScratch() || resource.key().shareable() == Shareable::kYes);
687 SkASSERT(resource.budgeted() == skgpu::Budgeted::kYes);
688 count++;
689 });
690 SkASSERT(count == fResourceMap.count());
691 }
692
693 // In the below checks we can assert that anything in the purgeable queue is purgeable because
694 // we won't put a Resource into that queue unless all refs are zero. Thus there is no way for
695 // that resource to be made non-purgeable without going through the cache (which will switch
696 // queues back to non-purgeable).
697 //
698 // However, we can't say the same for things in the non-purgeable array. It is possible that
699 // Resources have removed all their refs (thus technically become purgeable) but have not been
700 // processed back into the cache yet. Thus we may not have moved resources to the purgeable
701 // queue yet. Its also possible that Resource hasn't been added to the ReturnQueue yet (thread
702 // paused between unref and adding to ReturnQueue) so we can't even make asserts like not
703 // purgeable or is in ReturnQueue.
704 Stats stats(this);
705 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
706 SkASSERT(*fNonpurgeableResources[i]->accessCacheIndex() == i);
707 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
708 SkASSERT(!this->inPurgeableQueue(fNonpurgeableResources[i]));
709 stats.update(fNonpurgeableResources[i]);
710 }
711 bool firstPurgeableIsSizeZero = false;
712 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
713 if (i == 0) {
714 firstPurgeableIsSizeZero = (fPurgeableQueue.at(0)->gpuMemorySize() == 0);
715 }
716 if (firstPurgeableIsSizeZero) {
717 // If the first purgeable item (i.e. least recently used) is sized zero, then all other
718 // purgeable resources must also be sized zero since they should all have a timestamp of
719 // kMaxTimestamp.
720 SkASSERT(fPurgeableQueue.at(i)->gpuMemorySize() == 0);
721 }
722 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
723 SkASSERT(*fPurgeableQueue.at(i)->accessCacheIndex() == i);
724 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
725 stats.update(fPurgeableQueue.at(i));
726 }
727
728 SkASSERT((stats.fScratch + stats.fShareable) == fResourceMap.count());
729 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
730 SkASSERT(stats.fPurgeableBytes == fPurgeableBytes);
731 }
732
isInCache(const Resource * resource) const733 bool ResourceCache::isInCache(const Resource* resource) const {
734 int index = *resource->accessCacheIndex();
735 if (index < 0) {
736 return false;
737 }
738 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
739 return true;
740 }
741 if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
742 return true;
743 }
744 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
745 return false;
746 }
747
748 #endif // SK_DEBUG
749
750 #if defined(GPU_TEST_UTILS)
751
numFindableResources() const752 int ResourceCache::numFindableResources() const {
753 return fResourceMap.count();
754 }
755
topOfPurgeableQueue()756 Resource* ResourceCache::topOfPurgeableQueue() {
757 if (!fPurgeableQueue.count()) {
758 return nullptr;
759 }
760 return fPurgeableQueue.peek();
761 }
762
visitTextures(const std::function<void (const Texture *,bool purgeable)> & func) const763 void ResourceCache::visitTextures(
764 const std::function<void(const Texture*, bool purgeable)>& func) const {
765 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
766 if (const Texture* tex = fNonpurgeableResources[i]->asTexture()) {
767 func(tex, /* purgeable= */ false);
768 }
769 }
770 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
771 if (const Texture* tex = fPurgeableQueue.at(i)->asTexture()) {
772 func(tex, /* purgeable= */ true);
773 }
774 }
775 }
776
777 #endif // defined(GPU_TEST_UTILS)
778
779 } // namespace skgpu::graphite
780