1 /*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/graphite/ResourceCache.h"
9
10 #include "include/private/base/SingleOwner.h"
11 #include "src/base/SkNoDestructor.h"
12 #include "src/base/SkRandom.h"
13 #include "src/core/SkTMultiMap.h"
14 #include "src/core/SkTraceEvent.h"
15 #include "src/gpu/graphite/GraphiteResourceKey.h"
16 #include "src/gpu/graphite/ProxyCache.h"
17 #include "src/gpu/graphite/Resource.h"
18
19 #if defined(GPU_TEST_UTILS)
20 #include "src/gpu/graphite/Texture.h"
21 #endif
22
23 namespace skgpu::graphite {
24
25 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
26
27 namespace {
28
29 static constexpr uint32_t kMaxUseToken = 0xFFFFFFFF;
30
31 // Singleton Resource subclass that provides a fixed address used to track the end of the return
32 // queue and when the resource cache is shutdown.
33 class Sentinel : public Resource {
34 public:
Get()35 static Resource* Get() {
36 static SkNoDestructor<Sentinel> kSentinel{};
37 return kSentinel.get();
38 }
39
40 private:
41 template <typename T>
42 friend class ::SkNoDestructor;
43
44 // We can pass in a null shared context here because the only instance that is ever created is
45 // wrapped in SkNoDestructor, and we never actually use it as a Resource.
Sentinel()46 Sentinel() : Resource(/*sharedContext=*/nullptr, Ownership::kOwned, /*gpuMemorySize=*/0) {}
47
getResourceType() const48 const char* getResourceType() const override { return "Sentinel"; }
49
freeGpuData()50 void freeGpuData() override {}
51 };
52
53 } // anonymous namespace
54
Make(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)55 sk_sp<ResourceCache> ResourceCache::Make(SingleOwner* singleOwner,
56 uint32_t recorderID,
57 size_t maxBytes) {
58 return sk_sp<ResourceCache>(new ResourceCache(singleOwner, recorderID, maxBytes));
59 }
60
ResourceCache(SingleOwner * singleOwner,uint32_t recorderID,size_t maxBytes)61 ResourceCache::ResourceCache(SingleOwner* singleOwner, uint32_t recorderID, size_t maxBytes)
62 : fMaxBytes(maxBytes)
63 , fSingleOwner(singleOwner) {
64 if (recorderID != SK_InvalidGenID) {
65 fProxyCache = std::make_unique<ProxyCache>(recorderID);
66 }
67 // TODO: Maybe when things start using ResourceCache, then like Ganesh the compiler won't
68 // complain about not using fSingleOwner in Release builds and we can delete this.
69 #if !defined(SK_DEBUG)
70 (void)fSingleOwner;
71 #endif
72 }
73
~ResourceCache()74 ResourceCache::~ResourceCache() {
75 // The ResourceCache must have been shutdown by the ResourceProvider before it is destroyed.
76 SkASSERT(fReturnQueue.load(std::memory_order_acquire) == Sentinel::Get());
77 }
78
shutdown()79 void ResourceCache::shutdown() {
80 ASSERT_SINGLE_OWNER
81
82 // At this point no more changes will happen to fReturnQueue or the resources within that
83 // linked list. We do need to finish processing them for a graceful shutdown.
84 this->processReturnedResources(Sentinel::Get());
85
86 if (fProxyCache) {
87 fProxyCache->purgeAll();
88 // NOTE: any resources that would become purgeable or reusable from purging the proxy cache
89 // are not added to the return queue and remain in the nonpurgeable array. Below their
90 // cache ref will be removed, causing them to be deleted immediately.
91 }
92
93 while (!fNonpurgeableResources.empty()) {
94 Resource* back = *(fNonpurgeableResources.end() - 1);
95 SkASSERT(!back->wasDestroyed());
96 this->removeFromNonpurgeableArray(back);
97 back->unrefCache();
98 }
99
100 while (fPurgeableQueue.count()) {
101 Resource* top = fPurgeableQueue.peek();
102 SkASSERT(!top->wasDestroyed());
103 this->removeFromPurgeableQueue(top);
104 top->unrefCache();
105 }
106
107 TRACE_EVENT_INSTANT0("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD);
108 }
109
insertResource(Resource * resource,const GraphiteResourceKey & key,Budgeted budgeted,Shareable shareable)110 void ResourceCache::insertResource(Resource* resource,
111 const GraphiteResourceKey& key,
112 Budgeted budgeted,
113 Shareable shareable) {
114 ASSERT_SINGLE_OWNER
115 SkASSERT(resource);
116 SkASSERT(key.isValid());
117 SkASSERT(shareable == Shareable::kNo || budgeted == Budgeted::kYes);
118
119 SkASSERT(!this->isInCache(resource));
120 SkASSERT(!resource->wasDestroyed());
121 SkASSERT(!resource->isPurgeable());
122 SkASSERT(!resource->key().isValid());
123 // All resources in the cache are owned. If we track wrapped resources in the cache we'll need
124 // to update this check.
125 SkASSERT(resource->ownership() == Ownership::kOwned);
126
127 // Make sure we have the most accurate memory size for "memoryless" resources.
128 resource->updateGpuMemorySize();
129
130 // The reason to call processReturnedResources here is to get an accurate accounting of our
131 // memory usage as some resources can go from unbudgeted to budgeted when they return. So we
132 // want to have them all returned before adding the budget for the new resource in case we need
133 // to purge things. However, if the new resource has a memory size of 0, then we just skip
134 // returning resources (which has overhead for each call) since the new resource won't be
135 // affecting whether we're over or under budget.
136 if (resource->gpuMemorySize() > 0) {
137 this->processReturnedResources();
138 }
139
140 resource->registerWithCache(sk_ref_sp(this), key, budgeted, shareable);
141
142 // We must set the use token before adding to the array in case the token wraps and we wind
143 // up iterating over all the resources that already have use tokens.
144 this->setResourceUseToken(resource, this->getNextUseToken());
145 resource->updateAccessTime();
146
147 this->addToNonpurgeableArray(resource);
148
149 SkDEBUGCODE(fCount++;)
150
151 if (resource->shareable() != Shareable::kNo) {
152 // Scratch and shareable resources are always available for reuse
153 this->addToResourceMap(resource);
154 }
155
156 if (resource->budgeted() == Budgeted::kYes) {
157 fBudgetedBytes += resource->gpuMemorySize();
158 }
159
160 this->purgeAsNeeded();
161 }
162
findAndRefResource(const GraphiteResourceKey & key,Budgeted budgeted,Shareable shareable,const ScratchResourceSet * unavailable)163 Resource* ResourceCache::findAndRefResource(const GraphiteResourceKey& key,
164 Budgeted budgeted,
165 Shareable shareable,
166 const ScratchResourceSet* unavailable) {
167 ASSERT_SINGLE_OWNER
168
169 SkASSERT(key.isValid());
170 SkASSERT(shareable == Shareable::kNo || budgeted == Budgeted::kYes);
171 SkASSERT(shareable != Shareable::kScratch || SkToBool(unavailable));
172
173 auto shareablePredicate = [shareable, unavailable](Resource* r) {
174 // If the resource is in fResourceMap then it's available, so a non-shareable state means
175 // it really has no outstanding uses and can be converted to any other shareable state.
176 // Otherwise, if it's available, it can only be reused with the same mode. Additionally,
177 // for kScratch resources, they cannot already be in the `unavailable` set passed in.
178 return (r->shareable() == Shareable::kNo || r->shareable() == shareable) &&
179 (shareable != Shareable::kScratch || !unavailable->contains(r));
180 };
181
182 Resource* resource = fResourceMap.find(key, shareablePredicate);
183 if (!resource) {
184 // The main reason to call processReturnedResources in this call is to see if there are any
185 // resources that we could match with the key. However, there is overhead into calling it.
186 // So we only call it if we first failed to find a matching resource.
187 if (this->processReturnedResources()) {
188 resource = fResourceMap.find(key, shareablePredicate);
189 }
190 }
191 if (resource) {
192 // All resources we pull out of the cache for use should be budgeted
193 SkASSERT(resource->budgeted() == Budgeted::kYes);
194 SkASSERT(resource->key() == key);
195
196 if (shareable == Shareable::kNo) {
197 // If the returned resource is no longer shareable then we remove it from the map so
198 // that it isn't found again.
199 SkASSERT(resource->shareable() == Shareable::kNo);
200 this->removeFromResourceMap(resource);
201 if (budgeted == Budgeted::kNo) {
202 resource->setBudgeted(Budgeted::kNo);
203 fBudgetedBytes -= resource->gpuMemorySize();
204 }
205 } else {
206 // Shareable and scratch resources should never be requested as non-budgeted
207 SkASSERT(budgeted == Budgeted::kYes);
208 resource->setShareable(shareable);
209 }
210 this->refAndMakeResourceMRU(resource);
211 this->validate();
212 }
213
214 // processReturnedResources may have added resources back into our budget if they were being
215 // using in an SkImage or SkSurface previously. However, instead of calling purgeAsNeeded in
216 // processReturnedResources, we delay calling it until now so we don't end up purging a resource
217 // we're looking for in this function.
218 //
219 // We could avoid calling this if we didn't return any resources from processReturnedResources.
220 // However, when not overbudget purgeAsNeeded is very cheap. When overbudget there may be some
221 // really niche usage patterns that could cause us to never actually return resources to the
222 // cache, but still be overbudget due to shared resources. So to be safe we just always call it
223 // here.
224 this->purgeAsNeeded();
225
226 return resource;
227 }
228
refAndMakeResourceMRU(Resource * resource)229 void ResourceCache::refAndMakeResourceMRU(Resource* resource) {
230 SkASSERT(resource);
231 SkASSERT(this->isInCache(resource));
232
233 if (this->inPurgeableQueue(resource)) {
234 // It's about to become unpurgeable.
235 this->removeFromPurgeableQueue(resource);
236 this->addToNonpurgeableArray(resource);
237 }
238 resource->initialUsageRef();
239
240 this->setResourceUseToken(resource, this->getNextUseToken());
241 this->validate();
242 }
243
forceProcessReturnedResources()244 void ResourceCache::forceProcessReturnedResources() {
245 ASSERT_SINGLE_OWNER
246 this->processReturnedResources();
247 }
248
returnResource(Resource * resource)249 bool ResourceCache::returnResource(Resource* resource) {
250 SkASSERT(resource && resource->cache() == this);
251 // We only allow one instance of a Resource to be in the return queue at a time but it should
252 // have already added a return queue ref.
253 SkASSERT(!resource->inReturnQueue() && resource->hasReturnQueueRef());
254
255 // Check once with a relaxed load to try and minimize the amount of wasted preparation work if
256 // the cache is already shutdown.
257 Resource* oldHeadPtr = fReturnQueue.load(std::memory_order_relaxed);
258 if (oldHeadPtr == Sentinel::Get()) {
259 return false;
260 }
261
262 // When a non-shareable resource's CB and Usage refs are both zero, give it a chance to prepare
263 // itself to be reused. On Dawn/WebGPU we use this to remap kXferCpuToGpu buffers asynchronously
264 // so that they are already mapped before they come out of the cache again.
265 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo &&
266 resource->shareable() == Shareable::kNo) {
267 // If we get here, we know the usage ref count is 0, so the only way for that to increase
268 // again is if the Resource triggers the initial usage ref in the callback.
269 SkDEBUGCODE(bool takeRefActuallyCalled = false;)
270 bool takeRefCalled = resource->prepareForReturnToCache([&] {
271 // This adds a usage ref AND removes the return queue ref. When returnResource()
272 // returns true, the cache takes responsibility for releasing the return queue ref.
273 // If we returned false from returnResource() when the resource invokes the takeRef
274 // function, there's a gap between when the resource can be used on another thread
275 // and when this thread removes the return queue ref. If the resource's new usage
276 // ref is removed on the other thread before this thread were to remove the return
277 // queue ref, it would end up skipping the return.
278 //
279 // By immediately unreffing the return queue ref before the resource can be exposed
280 // to another thread, the resource will always be able to be re-returned when the
281 // async work completes.
282 //
283 // Since prepareForReturnToCache() can only be used with resources that require
284 // purgeability for reusability, and it is non-shareable, the only ref that can
285 // change off thread is the resource's cache ref if the cache is simultaneously
286 // shutdown.
287 //
288 // Adding the usage ref first ensures the resource won't be disposed of early. When
289 // the resource is prepared, it will come through returnResource() again but should
290 // return false from prepareForReturnToCache() so that cache shutdown is detected.
291 // This can add unnecessary preparation work for resources that won't ever be used,
292 // but keeps the preparation logic relatively simple w/o needing a mutex.
293 resource->initialUsageRef();
294 resource->unrefReturnQueue();
295
296 SkDEBUGCODE(takeRefActuallyCalled = true;
297 )});
298
299 SkASSERT(takeRefCalled == takeRefActuallyCalled);
300 if (takeRefCalled) {
301 // Return 'true' here because we've removed the return queue ref already and don't
302 // want Resource to try and do that again. But since we added an initial ref, this
303 // resource will be re-returned once the async prepare-for-return work has finished.
304 return true;
305 }
306 }
307
308 // Set the newly returned resource to be the head of the list, with its next pointer holding
309 // the old head. If the head has changed between the assignment of the next pointer, we repeat
310 // because it means there was a simultaneous return or the cache was shutdown.
311 do {
312 oldHeadPtr = fReturnQueue.load(std::memory_order_acquire);
313 if (oldHeadPtr == Sentinel::Get()) {
314 // Once the cache is shutdown, it can never be re-opened and we don't want to actually
315 // return this resource.
316 resource->setNextInReturnQueue(nullptr);
317 return false;
318 } else {
319 // If oldHeadPtr is null, this resource will be the tail of the return queue as it grows
320 // so set it's next pointer to the sentinel so that nullity can be used to test for
321 // being in the queue or not.
322 resource->setNextInReturnQueue(oldHeadPtr ? oldHeadPtr : Sentinel::Get());
323 }
324 } while(!fReturnQueue.compare_exchange_weak(oldHeadPtr, resource,
325 std::memory_order_release,
326 std::memory_order_relaxed));
327
328 // Once we've got here, it means that `resource` has been atomically included in the return
329 // queue. At this point, fReturnQueue's head value may or may not be `resource`, depending on if
330 // another thread has added another resource or processed the return queue, but in either event,
331 // `resource` will be visible to that thread.
332 return true;
333 }
334
processReturnedResources(Resource * queueHead)335 bool ResourceCache::processReturnedResources(Resource* queueHead) {
336 SkASSERT(queueHead == nullptr || queueHead == Sentinel::Get());
337 // We need to move the returned Resources off of the ReturnQueue before we start processing them
338 // so that we can manipulate the resources without blocking subsequent returns on other threads.
339 Resource* oldQueue = fReturnQueue.exchange(queueHead, std::memory_order_acq_rel);
340
341 // Can't un-shutdown the cache
342 SkASSERT(oldQueue != Sentinel::Get() || queueHead == Sentinel::Get());
343
344 int returnCount = 0;
345 // Stop if we encounter null or the sentinel address (either the list is empty, the cache is
346 // shutdown, or we reached the tail returned resource that had next set to the sentinel).
347 while (oldQueue && oldQueue != Sentinel::Get()) {
348 returnCount++;
349 oldQueue = this->processReturnedResource(oldQueue);
350 }
351
352 TRACE_EVENT_INSTANT1("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
353 "count", returnCount);
354 return returnCount > 0;
355 }
356
processReturnedResource(Resource * resource)357 Resource* ResourceCache::processReturnedResource(Resource* resource) {
358 // A resource should not have been destroyed when placed into the return queue. Also before
359 // purging any resources from the cache itself, it should always empty the queue first. When the
360 // cache releases/abandons all of its resources, it first invalidates the return queue so no new
361 // resources can be added. Thus we should not end up in a situation where a resource gets
362 // destroyed after it was added to the return queue.
363 SkASSERT(!resource->wasDestroyed());
364 SkASSERT(this->isInCache(resource));
365
366 const auto [isReusable, isPurgeable, next] = resource->unrefReturnQueue();
367
368 if (resource->shareable() != Shareable::kNo) {
369 // Shareable resources should still be discoverable in the resource map
370 SkASSERT(fResourceMap.has(resource, resource->key()));
371 SkASSERT(resource->isAvailableForReuse());
372
373 // Reset the resource's sharing mode so that any shareable request can use it (e.g. now that
374 // no more usages that required it to be scratch/shareable are held, the underlying resource
375 // can be used in a non-shareable manner the next time it's fetched from the cache). We can
376 // only change the shareable state when there are no outstanding usage refs. Because this
377 // resource was shareable, it remained in fResourceMap and could have a new usage ref before
378 // a prior return event was processed from the return queue. However, when a shareable ref
379 // has no usage refs, this is the only thread that can add an initial usage ref so it is
380 // safe to adjust its shareable type
381 if (isReusable) {
382 resource->setShareable(Shareable::kNo);
383 }
384 } else if (isReusable) {
385 // Non-shareable resources are removed from the resource map when they are given out by the
386 // cache. A resource is returned for either becoming reusable (needs to be added to the
387 // resource map) or becoming purgeable (needs to be moved to the purgeable queue). Becoming
388 // purgeable always implies becoming reusable, so as long as a previous return hasn't put it
389 // into the resource map already, we do that now.
390 if (!resource->isAvailableForReuse()) {
391 SkASSERT(!fResourceMap.has(resource, resource->key()));
392 this->addToResourceMap(resource);
393
394 if (resource->budgeted() == Budgeted::kNo) {
395 resource->setBudgeted(Budgeted::kYes);
396 fBudgetedBytes += resource->gpuMemorySize();
397 }
398 }
399
400 SkASSERT(fResourceMap.has(resource, resource->key()));
401 SkASSERT(resource->isAvailableForReuse());
402 // Since the resource should be non-shareable available as scratch, there are no outstanding
403 // refs that would make this assert not thread safe.
404 SkASSERT(resource->isUsableAsScratch());
405 } else {
406 // This was a stale entry in the return queue, which can arise when a Resource becomes
407 // reusable while it has outstanding command buffer refs. If the timing is right, the
408 // command buffer ref can be removed so the resource is purgeable (and goes back into the
409 // queue to be processed from non-purgeable to purgeable), but immediately after, the cache
410 // thread can add a usage ref. By the next time the return queue is processed, the resource
411 // is neither purgeable nor reusable.
412 SkASSERT(!fResourceMap.has(resource, resource->key()));
413 SkASSERT(!resource->isAvailableForReuse());
414 // At an instanteous moment, this resource should not be considered usable as scratch, but
415 // we cannot assert !isUsableAsScratch() because the other threads that are holding the
416 // extra refs described above can just as easily drop them between this assert and the last
417 // call to unrefReturnQueue() that put us into this branch.
418 }
419
420 // Update GPU budget now that the budget policy is up to date. Some GPU resources may have their
421 // actual memory amount change over time so update periodically.
422 if (resource->budgeted() == Budgeted::kYes) {
423 size_t oldSize = resource->gpuMemorySize();
424 resource->updateGpuMemorySize();
425 if (oldSize != resource->gpuMemorySize()) {
426 fBudgetedBytes -= oldSize;
427 fBudgetedBytes += resource->gpuMemorySize();
428 }
429 }
430
431 this->setResourceUseToken(resource, this->getNextUseToken());
432
433 // If the resource was not purgeable at the time the return queue ref was released, the
434 // resource should still be in the non-purgeable array from when it was originally given
435 // out. Another thread may have already removed the last refs keeping it non-purgeable by
436 // the time this thread reachs this line but that will only have re-added it to the return
437 // queue. The cache stores the resource based on its purgeability at the time of releasing
438 // the return queue ref. Any subsequent return due to becoming purgeable will complete
439 // moving the resource from the non-purgeable array to the purgeable queue.
440 SkASSERT(this->inNonpurgeableArray(resource));
441 if (!isPurgeable) {
442 this->validate();
443 return next;
444 }
445
446 // Since the resource is purgeable, there are no external refs that can add new refs to make
447 // it non-purgeable at this point. Only the current cache thread has that ability so we can
448 // safely continue moving the resource from non-purgeable to purgeable without worrying about
449 // another state change.
450 this->removeFromNonpurgeableArray(resource);
451
452 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kYes) {
453 this->purgeResource(resource);
454 } else {
455 // We don't purge this resource immediately even if we are overbudget. This allows later
456 // purgeAsNeeded() calls to prioritize deleting less-recently-used Resources first.
457 resource->updateAccessTime();
458 fPurgeableQueue.insert(resource);
459 fPurgeableBytes += resource->gpuMemorySize();
460 }
461 this->validate();
462
463 return next;
464 }
465
addToResourceMap(Resource * resource)466 void ResourceCache::addToResourceMap(Resource* resource) {
467 SkASSERT(this->isInCache(resource));
468 SkASSERT(!resource->isAvailableForReuse());
469 SkASSERT(!fResourceMap.has(resource, resource->key()));
470 fResourceMap.insert(resource->key(), resource);
471 resource->setAvailableForReuse(true);
472 }
473
removeFromResourceMap(Resource * resource)474 void ResourceCache::removeFromResourceMap(Resource* resource) {
475 SkASSERT(this->isInCache(resource));
476 SkASSERT(resource->isAvailableForReuse());
477 SkASSERT(fResourceMap.has(resource, resource->key()));
478 fResourceMap.remove(resource->key(), resource);
479 resource->setAvailableForReuse(false);
480 }
481
addToNonpurgeableArray(Resource * resource)482 void ResourceCache::addToNonpurgeableArray(Resource* resource) {
483 SkASSERT(!this->inNonpurgeableArray(resource));
484
485 int index = fNonpurgeableResources.size();
486 *fNonpurgeableResources.append() = resource;
487 *resource->accessCacheIndex() = index;
488 }
489
removeFromNonpurgeableArray(Resource * resource)490 void ResourceCache::removeFromNonpurgeableArray(Resource* resource) {
491 SkASSERT(this->inNonpurgeableArray(resource));
492
493 int* index = resource->accessCacheIndex();
494 // Fill the hole we will create in the array with the tail object, adjust its index, and
495 // then pop the array
496 Resource* tail = *(fNonpurgeableResources.end() - 1);
497 SkASSERT(fNonpurgeableResources[*index] == resource);
498 fNonpurgeableResources[*index] = tail;
499 *tail->accessCacheIndex() = *index;
500 fNonpurgeableResources.pop_back();
501 *index = -1;
502 }
503
removeFromPurgeableQueue(Resource * resource)504 void ResourceCache::removeFromPurgeableQueue(Resource* resource) {
505 SkASSERT(this->inPurgeableQueue(resource));
506
507 fPurgeableQueue.remove(resource);
508 fPurgeableBytes -= resource->gpuMemorySize();
509 // SkTDPQueue will set the index back to -1 in debug builds, but we are using the index as a
510 // flag for whether the Resource has been purged from the cache or not. So we need to make sure
511 // it always gets set.
512 *resource->accessCacheIndex() = -1;
513 }
514
inPurgeableQueue(const Resource * resource) const515 bool ResourceCache::inPurgeableQueue(const Resource* resource) const {
516 int index = *resource->accessCacheIndex();
517 return index >= 0 && index < fPurgeableQueue.count() &&
518 fPurgeableQueue.at(index) == resource;
519 }
520
521 #if defined(SK_DEBUG)
522
inNonpurgeableArray(const Resource * resource) const523 bool ResourceCache::inNonpurgeableArray(const Resource* resource) const {
524 int index = *resource->accessCacheIndex();
525 return index >= 0 && index < fNonpurgeableResources.size() &&
526 fNonpurgeableResources[index] == resource;
527 }
528
isInCache(const Resource * resource) const529 bool ResourceCache::isInCache(const Resource* resource) const {
530 if (this->inPurgeableQueue(resource) || this->inNonpurgeableArray(resource)) {
531 SkASSERT(resource->cache() == this);
532 SkASSERT(resource->hasCacheRef());
533 return true;
534 }
535 // Resource index should have been set to -1 if the resource is not in the cache
536 SkASSERT(*resource->accessCacheIndex() == -1);
537 // Don't assert that the resource has no cache ref, as the ResourceCache asserts this is false
538 // before it removes its cache ref in the event that that was the last resource keeping it alive
539 return false;
540 }
541
542 #endif // SK_DEBUG
543
purgeResource(Resource * resource)544 void ResourceCache::purgeResource(Resource* resource) {
545 SkASSERT(resource->isPurgeable());
546
547 TRACE_EVENT_INSTANT1("skia.gpu.cache", TRACE_FUNC, TRACE_EVENT_SCOPE_THREAD,
548 "size", resource->gpuMemorySize());
549
550 this->removeFromResourceMap(resource);
551
552 if (resource->shouldDeleteASAP() == Resource::DeleteASAP::kNo) {
553 SkASSERT(this->inPurgeableQueue(resource));
554 this->removeFromPurgeableQueue(resource);
555 }
556
557 SkASSERT(!this->isInCache(resource));
558
559 fBudgetedBytes -= resource->gpuMemorySize();
560 resource->unrefCache();
561 }
562
purgeAsNeeded()563 void ResourceCache::purgeAsNeeded() {
564 ASSERT_SINGLE_OWNER
565
566 if (this->overbudget() && fProxyCache) {
567 fProxyCache->freeUniquelyHeld();
568
569 // After the image cache frees resources we need to return those resources to the cache
570 this->processReturnedResources();
571 }
572 while (this->overbudget() && fPurgeableQueue.count()) {
573 Resource* resource = fPurgeableQueue.peek();
574 SkASSERT(!resource->wasDestroyed());
575 SkASSERT(fResourceMap.has(resource, resource->key()));
576
577 if (resource->lastUseToken() == kMaxUseToken) {
578 // If we hit a resource that is at kMaxUseToken, then we've hit the part of the
579 // purgeable queue with all zero sized resources. We don't want to actually remove those
580 // so we just break here.
581 SkASSERT(resource->gpuMemorySize() == 0);
582 break;
583 }
584
585 this->purgeResource(resource);
586 }
587
588 this->validate();
589 }
590
purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime)591 void ResourceCache::purgeResourcesNotUsedSince(StdSteadyClock::time_point purgeTime) {
592 ASSERT_SINGLE_OWNER
593 this->purgeResources(&purgeTime);
594 }
595
purgeResources()596 void ResourceCache::purgeResources() {
597 ASSERT_SINGLE_OWNER
598 this->purgeResources(nullptr);
599 }
600
purgeResources(const StdSteadyClock::time_point * purgeTime)601 void ResourceCache::purgeResources(const StdSteadyClock::time_point* purgeTime) {
602 TRACE_EVENT0("skia.gpu.cache", TRACE_FUNC);
603 if (fProxyCache) {
604 fProxyCache->purgeProxiesNotUsedSince(purgeTime);
605 }
606 this->processReturnedResources();
607
608 // Early out if the very first item is too new to purge to avoid sorting the queue when
609 // nothing will be deleted.
610 if (fPurgeableQueue.count() &&
611 purgeTime &&
612 fPurgeableQueue.peek()->lastAccessTime() >= *purgeTime) {
613 return;
614 }
615
616 // Sort the queue
617 fPurgeableQueue.sort();
618
619 // Make a list of the scratch resources to delete
620 SkTDArray<Resource*> resourcesToPurge;
621 for (int i = 0; i < fPurgeableQueue.count(); i++) {
622 Resource* resource = fPurgeableQueue.at(i);
623
624 const skgpu::StdSteadyClock::time_point resourceTime = resource->lastAccessTime();
625 if (purgeTime && resourceTime >= *purgeTime) {
626 // scratch or not, all later iterations will be too recently used to purge.
627 break;
628 }
629 SkASSERT(resource->isPurgeable());
630 *resourcesToPurge.append() = resource;
631 }
632
633 // Delete the scratch resources. This must be done as a separate pass
634 // to avoid messing up the sorted order of the queue
635 for (int i = 0; i < resourcesToPurge.size(); i++) {
636 this->purgeResource(resourcesToPurge[i]);
637 }
638
639 // Since we called process returned resources at the start of this call, we could still end up
640 // over budget even after purging resources based on purgeTime. So we call purgeAsNeeded at the
641 // end here.
642 this->purgeAsNeeded();
643 }
644
getNextUseToken()645 uint32_t ResourceCache::getNextUseToken() {
646 // If we wrap then all the existing resources will appear older than any resources that get
647 // a token after the wrap. We wrap one value early when we reach kMaxUseToken so that we
648 // can continue to use kMaxUseToken as a special case for zero sized resources.
649 if (fUseToken == kMaxUseToken) {
650 fUseToken = 0;
651 int count = this->getResourceCount();
652 if (count) {
653 // Reset all the tokens. We sort the resources by their use token and then assign
654 // sequential tokens beginning with 0. This is O(n*lg(n)) but it should be very rare.
655 SkTDArray<Resource*> sortedPurgeableResources;
656 sortedPurgeableResources.reserve(fPurgeableQueue.count());
657
658 while (fPurgeableQueue.count()) {
659 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
660 fPurgeableQueue.pop();
661 }
662
663 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(), CompareUseToken);
664
665 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
666 // use token and assign new tokens.
667 int currP = 0;
668 int currNP = 0;
669 while (currP < sortedPurgeableResources.size() &&
670 currNP < fNonpurgeableResources.size()) {
671 uint32_t tsP = sortedPurgeableResources[currP]->lastUseToken();
672 uint32_t tsNP = fNonpurgeableResources[currNP]->lastUseToken();
673 SkASSERT(tsP != tsNP);
674 if (tsP < tsNP) {
675 this->setResourceUseToken(sortedPurgeableResources[currP++], fUseToken++);
676 } else {
677 // Correct the index in the nonpurgeable array stored on the resource post-sort.
678 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
679 this->setResourceUseToken(fNonpurgeableResources[currNP++], fUseToken++);
680 }
681 }
682
683 // The above loop ended when we hit the end of one array. Finish the other one.
684 while (currP < sortedPurgeableResources.size()) {
685 this->setResourceUseToken(sortedPurgeableResources[currP++], fUseToken++);
686 }
687 while (currNP < fNonpurgeableResources.size()) {
688 *fNonpurgeableResources[currNP]->accessCacheIndex() = currNP;
689 this->setResourceUseToken(fNonpurgeableResources[currNP++], fUseToken++);
690 }
691
692 // Rebuild the queue.
693 for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
694 fPurgeableQueue.insert(sortedPurgeableResources[i]);
695 }
696
697 this->validate();
698 SkASSERT(count == this->getResourceCount());
699
700 // count should be the next use token we return.
701 SkASSERT(fUseToken == SkToU32(count));
702 }
703 }
704 return fUseToken++;
705 }
706
setResourceUseToken(Resource * resource,uint32_t token)707 void ResourceCache::setResourceUseToken(Resource* resource, uint32_t token) {
708 // We always set the use token for zero-sized resources to be kMaxUseToken
709 if (resource->gpuMemorySize() == 0) {
710 token = kMaxUseToken;
711 }
712 resource->setLastUseToken(token);
713 }
714
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const715 void ResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
716 ASSERT_SINGLE_OWNER
717
718 // There is no need to process the return queue here. Resources in the queue are still in
719 // either the purgeable queue or the nonpurgeable resources list (likely to be moved to the
720 // purgeable queue). However, the Resource's own ref counts are used to report its purgeable
721 // state to the memory dump, which is accurate without draining the return queue.
722
723 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
724 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump, false);
725 }
726 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
727 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump, true);
728 }
729 }
730
setMaxBudget(size_t bytes)731 void ResourceCache::setMaxBudget(size_t bytes) {
732 fMaxBytes = bytes;
733 this->processReturnedResources();
734 this->purgeAsNeeded();
735 }
736
737 ////////////////////////////////////////////////////////////////////////////////
738
739 #if defined(SK_DEBUG)
validate() const740 void ResourceCache::validate() const {
741 // Reduce the frequency of validations for large resource counts.
742 static SkRandom gRandom;
743 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
744 if (~mask && (gRandom.nextU() & mask)) {
745 return;
746 }
747
748 struct Stats {
749 int fShareable;
750 int fScratch;
751 size_t fBudgetedBytes;
752 size_t fPurgeableBytes;
753 const ResourceMap* fResourceMap;
754 const PurgeableQueue* fPurgeableQueue;
755
756 Stats(const ResourceCache* cache) {
757 memset(this, 0, sizeof(*this));
758 fResourceMap = &cache->fResourceMap;
759 fPurgeableQueue = &cache->fPurgeableQueue;
760 }
761
762 void update(Resource* resource) {
763 const GraphiteResourceKey& key = resource->key();
764 SkASSERT(key.isValid());
765
766 // All resources in the cache are owned. If we track wrapped resources in the cache
767 // we'll need to update this check.
768 SkASSERT(resource->ownership() == Ownership::kOwned);
769
770 if (resource->shareable() == Shareable::kYes) {
771 SkASSERT(resource->isAvailableForReuse());
772 SkASSERT(fResourceMap->has(resource, key));
773 SkASSERT(resource->budgeted() == Budgeted::kYes);
774 ++fShareable;
775 } else if (resource->isAvailableForReuse()) {
776 // We track scratch resources (either non-shareable with no refs that are returned,
777 // or explicitly scratch shared) separately from fully shareable.
778 SkASSERT(resource->isUsableAsScratch());
779 SkASSERT(fResourceMap->has(resource, key));
780 ++fScratch;
781 } else {
782 // This should be a non-shareable resource that isn't available for reuse.
783 SkASSERT(resource->shareable() == Shareable::kNo);
784 SkASSERT(!fResourceMap->has(resource, key));
785 }
786
787 if (resource->budgeted() == Budgeted::kYes) {
788 fBudgetedBytes += resource->gpuMemorySize();
789 }
790
791 if (resource->gpuMemorySize() == 0) {
792 SkASSERT(resource->lastUseToken() == kMaxUseToken);
793 } else {
794 SkASSERT(resource->lastUseToken() < kMaxUseToken);
795 }
796
797 int index = *resource->accessCacheIndex();
798 if (index < fPurgeableQueue->count() && fPurgeableQueue->at(index) == resource) {
799 SkASSERT(resource->isPurgeable());
800 fPurgeableBytes += resource->gpuMemorySize();
801 }
802 }
803 };
804
805 {
806 int count = 0;
807 fResourceMap.foreach([&](const Resource& resource) {
808 SkASSERT(resource.isUsableAsScratch() || resource.shareable() == Shareable::kYes);
809 SkASSERT(resource.budgeted() == Budgeted::kYes);
810 SkASSERT(resource.isAvailableForReuse());
811 SkASSERT(this->isInCache(&resource));
812 count++;
813 });
814 SkASSERT(count == fResourceMap.count());
815 }
816
817 // In the below checks we can assert that anything in the purgeable queue is purgeable because
818 // we won't put a Resource into that queue unless all refs are zero. Thus there is no way for
819 // that resource to be made non-purgeable without going through the cache (which will switch
820 // queues back to non-purgeable).
821 //
822 // However, we can't say the same for things in the non-purgeable array. It is possible that
823 // Resources have removed all their refs (thus technically become purgeable) but have not been
824 // processed back into the cache yet. Thus we may not have moved resources to the purgeable
825 // queue yet. Its also possible that Resource hasn't been added to the ReturnQueue yet (thread
826 // paused between unref and adding to ReturnQueue) so we can't even make asserts like not
827 // purgeable or is in ReturnQueue.
828 Stats stats(this);
829 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
830 SkASSERT(this->isInCache(fNonpurgeableResources[i]));
831 SkASSERT(*fNonpurgeableResources[i]->accessCacheIndex() == i);
832 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
833 SkASSERT(!this->inPurgeableQueue(fNonpurgeableResources[i]));
834 stats.update(fNonpurgeableResources[i]);
835 }
836 bool firstPurgeableIsSizeZero = false;
837 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
838 if (i == 0) {
839 firstPurgeableIsSizeZero = (fPurgeableQueue.at(0)->gpuMemorySize() == 0);
840 }
841 if (firstPurgeableIsSizeZero) {
842 // If the first purgeable item (i.e. least recently used) is sized zero, then all other
843 // purgeable resources must also be sized zero since they should all have a use token of
844 // kMaxUseToken.
845 SkASSERT(fPurgeableQueue.at(i)->gpuMemorySize() == 0);
846 }
847 SkASSERT(this->isInCache(fPurgeableQueue.at(i)));
848 SkASSERT(fPurgeableQueue.at(i)->isPurgeable());
849 SkASSERT(*fPurgeableQueue.at(i)->accessCacheIndex() == i);
850 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
851 stats.update(fPurgeableQueue.at(i));
852 }
853
854 SkASSERT((stats.fScratch + stats.fShareable) == fResourceMap.count());
855 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
856 SkASSERT(stats.fPurgeableBytes == fPurgeableBytes);
857 }
858
859 #endif // SK_DEBUG
860
861 #if defined(GPU_TEST_UTILS)
862
numFindableResources() const863 int ResourceCache::numFindableResources() const {
864 return fResourceMap.count();
865 }
866
topOfPurgeableQueue()867 Resource* ResourceCache::topOfPurgeableQueue() {
868 if (!fPurgeableQueue.count()) {
869 return nullptr;
870 }
871 return fPurgeableQueue.peek();
872 }
873
visitTextures(const std::function<void (const Texture *,bool purgeable)> & func) const874 void ResourceCache::visitTextures(
875 const std::function<void(const Texture*, bool purgeable)>& func) const {
876 for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
877 if (const Texture* tex = fNonpurgeableResources[i]->asTexture()) {
878 func(tex, /* purgeable= */ false);
879 }
880 }
881 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
882 if (const Texture* tex = fPurgeableQueue.at(i)->asTexture()) {
883 func(tex, /* purgeable= */ true);
884 }
885 }
886 }
887
888 #endif // defined(GPU_TEST_UTILS)
889
890 } // namespace skgpu::graphite
891