• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/ganesh/GrResourceCache.h"
9 
10 #include "include/core/SkString.h"
11 #include "include/gpu/ganesh/GrDirectContext.h"
12 #include "include/gpu/ganesh/GrTypes.h"
13 #include "include/private/base/SingleOwner.h"
14 #include "include/private/base/SkNoncopyable.h"
15 #include "include/private/base/SkTo.h"
16 #include "src/base/SkMathPriv.h"
17 #include "src/base/SkRandom.h"
18 #include "src/base/SkTSort.h"
19 #include "src/core/SkStringUtils.h"
20 #include "src/core/SkMessageBus.h"
21 #include "src/core/SkTraceEvent.h"
22 #include "src/gpu/ganesh/GrDirectContextPriv.h"
23 #include "src/gpu/ganesh/GrGpuResourceCacheAccess.h"
24 #include "src/gpu/ganesh/GrProxyProvider.h"
25 #ifdef SKIA_OHOS
26 #include "src/gpu/ganesh/GrPerfMonitorReporter.h"
27 #endif
28 #include "src/gpu/ganesh/GrThreadSafeCache.h"
29 
30 #include <algorithm>
31 #include <chrono>
32 #include <cstring>
33 #include <vector>
34 #ifdef SKIA_DFX_FOR_OHOS
35 #include <sstream>
36 #include <iostream>
37 #endif
38 
39 using namespace skia_private;
40 
41 DECLARE_SKMESSAGEBUS_MESSAGE(skgpu::UniqueKeyInvalidatedMessage, uint32_t, true)
42 
43 DECLARE_SKMESSAGEBUS_MESSAGE(GrResourceCache::UnrefResourceMessage,
44                              GrDirectContext::DirectContextID,
45                              /*AllowCopyableMessage=*/false)
46 
47 #define ASSERT_SINGLE_OWNER SKGPU_ASSERT_SINGLE_OWNER(fSingleOwner)
48 
49 //////////////////////////////////////////////////////////////////////////////
50 
51 class GrResourceCache::AutoValidate : ::SkNoncopyable {
52 public:
AutoValidate(GrResourceCache * cache)53     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()54     ~AutoValidate() { fCache->validate(); }
55 private:
56     GrResourceCache* fCache;
57 };
58 
59 //////////////////////////////////////////////////////////////////////////////
60 
GrResourceCache(skgpu::SingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)61 GrResourceCache::GrResourceCache(skgpu::SingleOwner* singleOwner,
62                                  GrDirectContext::DirectContextID owningContextID,
63                                  uint32_t familyID)
64         : fInvalidUniqueKeyInbox(familyID)
65         , fUnrefResourceInbox(owningContextID)
66         , fOwningContextID(owningContextID)
67         , fContextUniqueID(familyID)
68         , fSingleOwner(singleOwner) {
69     SkASSERT(owningContextID.isValid());
70     SkASSERT(familyID != SK_InvalidUniqueID);
71 }
72 
~GrResourceCache()73 GrResourceCache::~GrResourceCache() {
74     this->releaseAll();
75 }
76 
setLimit(size_t bytes)77 void GrResourceCache::setLimit(size_t bytes) {
78     fMaxBytes = bytes;
79     this->purgeAsNeeded();
80 }
81 
82 #ifdef SKIA_DFX_FOR_OHOS
83 static constexpr int MB = 1024 * 1024;
84 
85 #ifdef SKIA_OHOS
86 bool GrResourceCache::purgeUnlocakedResTraceEnabled_ =
87 #ifndef SKIA_OHOS_DEBUG
88     false;
89 #else
90     std::atoi((OHOS::system::GetParameter("sys.graphic.skia.cache.debug", "0").c_str())) == 1;
91 #endif
92 #endif
93 
dumpInfo(SkString * out)94 void GrResourceCache::dumpInfo(SkString* out) {
95     if (out == nullptr) {
96         SkDebugf("OHOS GrResourceCache::dumpInfo outPtr is nullptr!");
97         return;
98     }
99     auto info = cacheInfo();
100     constexpr uint8_t STEP_INDEX = 1;
101     TArray<SkString> lines;
102     SkStrSplit(info.substr(STEP_INDEX, info.length() - STEP_INDEX).c_str(), ";", &lines);
103     for (int i = 0; i < lines.size(); ++i) {
104         out->appendf("    %s\n", lines[i].c_str());
105     }
106 }
107 
cacheInfo()108 std::string GrResourceCache::cacheInfo()
109 {
110     auto fPurgeableQueueInfoStr = cacheInfoPurgeableQueue();
111     auto fNonpurgeableResourcesInfoStr = cacheInfoNoPurgeableQueue();
112 
113     std::ostringstream cacheInfoStream;
114     cacheInfoStream << "[fPurgeableQueueInfoStr.count : " << fPurgeableQueue.count()
115         << "; fNonpurgeableResources.count : " << fNonpurgeableResources.size()
116         << "; fBudgetedBytes : " << fBudgetedBytes
117         << "(" << static_cast<size_t>(fBudgetedBytes / MB)
118         << " MB) / " << fMaxBytes
119         << "(" << static_cast<size_t>(fMaxBytes / MB)
120         << " MB); fBudgetedCount : " << fBudgetedCount
121         << "; fBytes : " << fBytes
122         << "(" << static_cast<size_t>(fBytes / MB)
123         << " MB); fPurgeableBytes : " << fPurgeableBytes
124         << "(" << static_cast<size_t>(fPurgeableBytes / MB)
125         << " MB); fAllocImageBytes : " << fAllocImageBytes
126         << "(" << static_cast<size_t>(fAllocImageBytes / MB)
127         << " MB); fAllocBufferBytes : " << fAllocBufferBytes
128         << "(" << static_cast<size_t>(fAllocBufferBytes / MB)
129         << " MB); fTimestamp : " << fTimestamp
130         << "; " << fPurgeableQueueInfoStr << "; " << fNonpurgeableResourcesInfoStr;
131     return cacheInfoStream.str();
132 }
133 
134 #ifdef SKIA_OHOS
traceBeforePurgeUnlockRes(const std::string & method,SimpleCacheInfo & simpleCacheInfo)135 void GrResourceCache::traceBeforePurgeUnlockRes(const std::string& method, SimpleCacheInfo& simpleCacheInfo)
136 {
137     if (purgeUnlocakedResTraceEnabled_) {
138         StartTrace(HITRACE_TAG_GRAPHIC_AGP, method + " begin cacheInfo = " + cacheInfo());
139     } else {
140         simpleCacheInfo.fPurgeableQueueCount = fPurgeableQueue.count();
141         simpleCacheInfo.fNonpurgeableResourcesCount = fNonpurgeableResources.size();
142         simpleCacheInfo.fPurgeableBytes = fPurgeableBytes;
143         simpleCacheInfo.fBudgetedCount = fBudgetedCount;
144         simpleCacheInfo.fBudgetedBytes = fBudgetedBytes;
145         simpleCacheInfo.fAllocImageBytes = fAllocImageBytes;
146         simpleCacheInfo.fAllocBufferBytes = fAllocBufferBytes;
147     }
148 }
149 
traceAfterPurgeUnlockRes(const std::string & method,const SimpleCacheInfo & simpleCacheInfo)150 void GrResourceCache::traceAfterPurgeUnlockRes(const std::string& method, const SimpleCacheInfo& simpleCacheInfo)
151 {
152 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
153     if (purgeUnlocakedResTraceEnabled_) {
154         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s", method.c_str(), cacheInfo().c_str());
155         FinishTrace(HITRACE_TAG_GRAPHIC_AGP);
156     } else {
157         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s",
158             method.c_str(), cacheInfoComparison(simpleCacheInfo).c_str());
159     }
160 #endif
161 }
162 
cacheInfoComparison(const SimpleCacheInfo & simpleCacheInfo)163 std::string GrResourceCache::cacheInfoComparison(const SimpleCacheInfo& simpleCacheInfo)
164 {
165     std::ostringstream cacheInfoComparison;
166     cacheInfoComparison << "PurgeableCount : " << simpleCacheInfo.fPurgeableQueueCount
167         << " / " << fPurgeableQueue.count()
168         << "; NonpurgeableCount : " << simpleCacheInfo.fNonpurgeableResourcesCount
169         << " / " << fNonpurgeableResources.size()
170         << "; PurgeableBytes : " << simpleCacheInfo.fPurgeableBytes << " / " << fPurgeableBytes
171         << "; BudgetedCount : " << simpleCacheInfo.fBudgetedCount << " / " << fBudgetedCount
172         << "; BudgetedBytes : " << simpleCacheInfo.fBudgetedBytes << " / " << fBudgetedBytes
173         << "; AllocImageBytes : " << simpleCacheInfo.fAllocImageBytes << " / " << fAllocImageBytes
174         << "; AllocBufferBytes : " << simpleCacheInfo.fAllocBufferBytes << " / " << fAllocBufferBytes;
175     return cacheInfoComparison.str();
176 }
177 #endif // SKIA_OHOS
178 
cacheInfoPurgeableQueue()179 std::string GrResourceCache::cacheInfoPurgeableQueue()
180 {
181     std::map<uint64_t, size_t> purgSizeInfoWid;
182     std::map<uint64_t, int> purgCountInfoWid;
183     std::map<uint64_t, std::string> purgNameInfoWid;
184     std::map<uint64_t, int> purgPidInfoWid;
185 
186     std::map<uint32_t, size_t> purgSizeInfoPid;
187     std::map<uint32_t, int> purgCountInfoPid;
188     std::map<uint32_t, std::string> purgNameInfoPid;
189 
190     std::map<uint32_t, size_t> purgSizeInfoFid;
191     std::map<uint32_t, int> purgCountInfoFid;
192     std::map<uint32_t, std::string> purgNameInfoFid;
193 
194     int purgCountUnknown = 0;
195     size_t purgSizeUnknown = 0;
196 
197     for (int i = 0; i < fPurgeableQueue.count(); i++) {
198         auto resource = fPurgeableQueue.at(i);
199         auto resourceTag = resource->getResourceTag();
200         if (resourceTag.fWid != 0) {
201             updatePurgeableWidMap(resource, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
202         } else if (resourceTag.fFid != 0) {
203             updatePurgeableFidMap(resource, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
204             if (resourceTag.fPid != 0) {
205                 updatePurgeablePidMap(resource, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
206             }
207         } else {
208             purgCountUnknown++;
209             purgSizeUnknown += resource->gpuMemorySize();
210         }
211     }
212 
213     std::string infoStr;
214     if (purgSizeInfoWid.size() > 0) {
215         infoStr += ";PurgeableInfo_Node:[";
216         updatePurgeableWidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
217     }
218     if (purgSizeInfoPid.size() > 0) {
219         infoStr += ";PurgeableInfo_Pid:[";
220         updatePurgeablePidInfo(infoStr, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
221     }
222     if (purgSizeInfoFid.size() > 0) {
223         infoStr += ";PurgeableInfo_Fid:[";
224         updatePurgeableFidInfo(infoStr, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
225     }
226     updatePurgeableUnknownInfo(infoStr, ";PurgeableInfo_Unknown:", purgCountUnknown, purgSizeUnknown);
227     return infoStr;
228 }
229 
cacheInfoNoPurgeableQueue()230 std::string GrResourceCache::cacheInfoNoPurgeableQueue()
231 {
232     std::map<uint64_t, size_t> noPurgSizeInfoWid;
233     std::map<uint64_t, int> noPurgCountInfoWid;
234     std::map<uint64_t, std::string> noPurgNameInfoWid;
235     std::map<uint64_t, int> noPurgPidInfoWid;
236 
237     std::map<uint32_t, size_t> noPurgSizeInfoPid;
238     std::map<uint32_t, int> noPurgCountInfoPid;
239     std::map<uint32_t, std::string> noPurgNameInfoPid;
240 
241     std::map<uint32_t, size_t> noPurgSizeInfoFid;
242     std::map<uint32_t, int> noPurgCountInfoFid;
243     std::map<uint32_t, std::string> noPurgNameInfoFid;
244 
245     int noPurgCountUnknown = 0;
246     size_t noPurgSizeUnknown = 0;
247 
248     for (int i = 0; i < fNonpurgeableResources.size(); i++) {
249         auto resource = fNonpurgeableResources[i];
250         if (resource == nullptr) {
251             continue;
252         }
253         auto resourceTag = resource->getResourceTag();
254         if (resourceTag.fWid != 0) {
255             updatePurgeableWidMap(resource, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
256         } else if (resourceTag.fFid != 0) {
257             updatePurgeableFidMap(resource, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
258             if (resourceTag.fPid != 0) {
259                 updatePurgeablePidMap(resource, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
260             }
261         } else {
262             noPurgCountUnknown++;
263             noPurgSizeUnknown += resource->gpuMemorySize();
264         }
265     }
266 
267     std::string infoStr;
268     if (noPurgSizeInfoWid.size() > 0) {
269         infoStr += ";NonPurgeableInfo_Node:[";
270         updatePurgeableWidInfo(infoStr, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
271     }
272     if (noPurgSizeInfoPid.size() > 0) {
273         infoStr += ";NonPurgeableInfo_Pid:[";
274         updatePurgeablePidInfo(infoStr, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
275     }
276     if (noPurgSizeInfoFid.size() > 0) {
277         infoStr += ";NonPurgeableInfo_Fid:[";
278         updatePurgeableFidInfo(infoStr, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
279     }
280     updatePurgeableUnknownInfo(infoStr, ";NonPurgeableInfo_Unknown:", noPurgCountUnknown, noPurgSizeUnknown);
281     return infoStr;
282 }
283 
updatePurgeableWidMap(GrGpuResource * resource,std::map<uint64_t,std::string> & nameInfoWid,std::map<uint64_t,size_t> & sizeInfoWid,std::map<uint64_t,int> & pidInfoWid,std::map<uint64_t,int> & countInfoWid)284 void GrResourceCache::updatePurgeableWidMap(GrGpuResource* resource,
285                                             std::map<uint64_t, std::string>& nameInfoWid,
286                                             std::map<uint64_t, size_t>& sizeInfoWid,
287                                             std::map<uint64_t, int>& pidInfoWid,
288                                             std::map<uint64_t, int>& countInfoWid)
289 {
290     auto resourceTag = resource->getResourceTag();
291     auto it = sizeInfoWid.find(resourceTag.fWid);
292     if (it != sizeInfoWid.end()) {
293         sizeInfoWid[resourceTag.fWid] = it->second + resource->gpuMemorySize();
294         countInfoWid[resourceTag.fWid]++;
295     } else {
296         sizeInfoWid[resourceTag.fWid] = resource->gpuMemorySize();
297         nameInfoWid[resourceTag.fWid] = resourceTag.fName;
298         pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
299         countInfoWid[resourceTag.fWid] = 1;
300     }
301 }
302 
updatePurgeablePidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)303 void GrResourceCache::updatePurgeablePidMap(GrGpuResource* resource,
304                                             std::map<uint32_t, std::string>& nameInfoPid,
305                                             std::map<uint32_t, size_t>& sizeInfoPid,
306                                             std::map<uint32_t, int>& countInfoPid)
307 {
308     auto resourceTag = resource->getResourceTag();
309     auto it = sizeInfoPid.find(resourceTag.fPid);
310     if (it != sizeInfoPid.end()) {
311         sizeInfoPid[resourceTag.fPid] = it->second + resource->gpuMemorySize();
312         countInfoPid[resourceTag.fPid]++;
313     } else {
314         sizeInfoPid[resourceTag.fPid] = resource->gpuMemorySize();
315         nameInfoPid[resourceTag.fPid] = resourceTag.fName;
316         countInfoPid[resourceTag.fPid] = 1;
317     }
318 }
319 
updatePurgeableFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)320 void GrResourceCache::updatePurgeableFidMap(GrGpuResource* resource,
321                                             std::map<uint32_t, std::string>& nameInfoFid,
322                                             std::map<uint32_t, size_t>& sizeInfoFid,
323                                             std::map<uint32_t, int>& countInfoFid)
324 {
325     auto resourceTag = resource->getResourceTag();
326     auto it = sizeInfoFid.find(resourceTag.fFid);
327     if (it != sizeInfoFid.end()) {
328         sizeInfoFid[resourceTag.fFid] = it->second + resource->gpuMemorySize();
329         countInfoFid[resourceTag.fFid]++;
330     } else {
331         sizeInfoFid[resourceTag.fFid] = resource->gpuMemorySize();
332         nameInfoFid[resourceTag.fFid] = resourceTag.fName;
333         countInfoFid[resourceTag.fFid] = 1;
334     }
335 }
336 
updatePurgeableWidInfo(std::string & infoStr,std::map<uint64_t,std::string> & nameInfoWid,std::map<uint64_t,size_t> & sizeInfoWid,std::map<uint64_t,int> & pidInfoWid,std::map<uint64_t,int> & countInfoWid)337 void GrResourceCache::updatePurgeableWidInfo(std::string& infoStr,
338                                              std::map<uint64_t, std::string>& nameInfoWid,
339                                              std::map<uint64_t, size_t>& sizeInfoWid,
340                                              std::map<uint64_t, int>& pidInfoWid,
341                                              std::map<uint64_t, int>& countInfoWid)
342 {
343     for (auto it = sizeInfoWid.begin(); it != sizeInfoWid.end(); it++) {
344         infoStr += "[" + nameInfoWid[it->first] +
345             ",pid=" + std::to_string(pidInfoWid[it->first]) +
346             ",NodeId=" + std::to_string(it->first) +
347             ",count=" + std::to_string(countInfoWid[it->first]) +
348             ",size=" + std::to_string(it->second) +
349             "(" + std::to_string(it->second / MB) + " MB)],";
350     }
351     infoStr += ']';
352 }
353 
updatePurgeablePidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)354 void GrResourceCache::updatePurgeablePidInfo(std::string& infoStr,
355                                              std::map<uint32_t, std::string>& nameInfoPid,
356                                              std::map<uint32_t, size_t>& sizeInfoPid,
357                                              std::map<uint32_t, int>& countInfoPid)
358 {
359     for (auto it = sizeInfoPid.begin(); it != sizeInfoPid.end(); it++) {
360         infoStr += "[" + nameInfoPid[it->first] +
361             ",pid=" + std::to_string(it->first) +
362             ",count=" + std::to_string(countInfoPid[it->first]) +
363             ",size=" + std::to_string(it->second) +
364             "(" + std::to_string(it->second / MB) + " MB)],";
365     }
366     infoStr += ']';
367 }
368 
updatePurgeableFidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)369 void GrResourceCache::updatePurgeableFidInfo(std::string& infoStr,
370                                              std::map<uint32_t, std::string>& nameInfoFid,
371                                              std::map<uint32_t, size_t>& sizeInfoFid,
372                                              std::map<uint32_t, int>& countInfoFid)
373 {
374     for (auto it = sizeInfoFid.begin(); it != sizeInfoFid.end(); it++) {
375         infoStr += "[" + nameInfoFid[it->first] +
376             ",typeid=" + std::to_string(it->first) +
377             ",count=" + std::to_string(countInfoFid[it->first]) +
378             ",size=" + std::to_string(it->second) +
379             "(" + std::to_string(it->second / MB) + " MB)],";
380     }
381     infoStr += ']';
382 }
383 
updatePurgeableUnknownInfo(std::string & infoStr,const std::string & unknownPrefix,const int countUnknown,const size_t sizeUnknown)384 void GrResourceCache::updatePurgeableUnknownInfo(
385     std::string& infoStr, const std::string& unknownPrefix, const int countUnknown, const size_t sizeUnknown)
386 {
387     if (countUnknown > 0) {
388         infoStr += unknownPrefix +
389             "[count=" + std::to_string(countUnknown) +
390             ",size=" + std::to_string(sizeUnknown) +
391             "(" + std::to_string(sizeUnknown / MB) + "MB)]";
392     }
393 }
394 #endif
395 
insertResource(GrGpuResource * resource)396 void GrResourceCache::insertResource(GrGpuResource* resource)
397 {
398     ASSERT_SINGLE_OWNER
399     SkASSERT(resource);
400     SkASSERT(!this->isInCache(resource));
401     SkASSERT(!resource->wasDestroyed());
402     SkASSERT(!resource->resourcePriv().isPurgeable());
403 
404     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
405     // up iterating over all the resources that already have timestamps.
406     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
407 
408     this->addToNonpurgeableArray(resource);
409 
410     size_t size = resource->gpuMemorySize();
411     SkDEBUGCODE(++fCount;)
412     fBytes += size;
413 
414     // OH ISSUE: memory count
415     auto pid = resource->getResourceTag().fPid;
416     if (pid && resource->isRealAlloc()) {
417         auto& pidSize = fBytesOfPid[pid];
418         pidSize += size;
419         fUpdatedBytesOfPid[pid] = pidSize;
420         if (pidSize >= fMemoryControl_ && fExitedPid_.find(pid) == fExitedPid_.end() && fMemoryOverflowCallback_) {
421             fMemoryOverflowCallback_(pid, pidSize, true);
422             fExitedPid_.insert(pid);
423             SkDebugf("OHOS resource overflow! pid[%{public}d], size[%{public}zu]", pid, pidSize);
424 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
425             HITRACE_OHOS_NAME_FMT_ALWAYS("OHOS gpu resource overflow: pid(%u), size:(%u)", pid, pidSize);
426 #endif
427         }
428     }
429 
430 #if GR_CACHE_STATS
431     fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
432     fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
433 #endif
434     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
435         ++fBudgetedCount;
436         fBudgetedBytes += size;
437         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
438                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
439 #if GR_CACHE_STATS
440         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
441         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
442 #endif
443     }
444     SkASSERT(!resource->cacheAccess().isUsableAsScratch());
445 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
446     if (fBudgetedBytes >= fMaxBytes) {
447         HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "cache over fBudgetedBytes:(%u),fMaxBytes:(%u)",
448             fBudgetedBytes, fMaxBytes);
449 #ifdef SKIA_DFX_FOR_OHOS
450         SimpleCacheInfo simpleCacheInfo;
451         traceBeforePurgeUnlockRes("insertResource", simpleCacheInfo);
452 #endif
453         this->purgeAsNeeded();
454 #ifdef SKIA_DFX_FOR_OHOS
455         traceAfterPurgeUnlockRes("insertResource", simpleCacheInfo);
456 #endif
457     } else {
458         this->purgeAsNeeded();
459     }
460 #else
461     this->purgeAsNeeded();
462 #endif
463 }
464 
removeResource(GrGpuResource * resource)465 void GrResourceCache::removeResource(GrGpuResource* resource) {
466     ASSERT_SINGLE_OWNER
467     this->validate();
468     SkASSERT(this->isInCache(resource));
469 
470     size_t size = resource->gpuMemorySize();
471     if (resource->resourcePriv().isPurgeable()) {
472         fPurgeableQueue.remove(resource);
473         fPurgeableBytes -= size;
474     } else {
475         this->removeFromNonpurgeableArray(resource);
476     }
477 
478     SkDEBUGCODE(--fCount;)
479     fBytes -= size;
480 
481     // OH ISSUE: memory count
482     auto pid = resource->getResourceTag().fPid;
483     if (pid && resource->isRealAlloc()) {
484         auto& pidSize = fBytesOfPid[pid];
485         pidSize -= size;
486         fUpdatedBytesOfPid[pid] = pidSize;
487         if (pidSize == 0) {
488             fBytesOfPid.erase(pid);
489         }
490     }
491 
492     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
493         --fBudgetedCount;
494         fBudgetedBytes -= size;
495         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
496                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
497     }
498 
499     if (resource->cacheAccess().isUsableAsScratch()) {
500         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
501     }
502     if (resource->getUniqueKey().isValid()) {
503         fUniqueHash.remove(resource->getUniqueKey());
504     }
505     this->validate();
506 }
507 
abandonAll()508 void GrResourceCache::abandonAll() {
509     AutoValidate av(this);
510 
511     while (!fNonpurgeableResources.empty()) {
512         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
513         SkASSERT(!back->wasDestroyed());
514         back->cacheAccess().abandon();
515     }
516 
517     while (fPurgeableQueue.count()) {
518         GrGpuResource* top = fPurgeableQueue.peek();
519         SkASSERT(!top->wasDestroyed());
520         top->cacheAccess().abandon();
521     }
522 
523     fThreadSafeCache->dropAllRefs();
524 
525     SkASSERT(!fScratchMap.count());
526     SkASSERT(!fUniqueHash.count());
527     SkASSERT(!fCount);
528     SkASSERT(!this->getResourceCount());
529     SkASSERT(!fBytes);
530     SkASSERT(!fBudgetedCount);
531     SkASSERT(!fBudgetedBytes);
532     SkASSERT(!fPurgeableBytes);
533 }
534 
releaseAll()535 void GrResourceCache::releaseAll() {
536     AutoValidate av(this);
537 
538     fThreadSafeCache->dropAllRefs();
539 
540     this->processFreedGpuResources();
541 
542     SkASSERT(fProxyProvider); // better have called setProxyProvider
543     SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
544 
545     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
546     // they also have a raw pointer back to this class (which is presumably going away)!
547     fProxyProvider->removeAllUniqueKeys();
548 
549     while (!fNonpurgeableResources.empty()) {
550         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
551         SkASSERT(!back->wasDestroyed());
552         back->cacheAccess().release();
553     }
554 
555     while (fPurgeableQueue.count()) {
556         GrGpuResource* top = fPurgeableQueue.peek();
557         SkASSERT(!top->wasDestroyed());
558         top->cacheAccess().release();
559     }
560 
561     SkASSERT(!fScratchMap.count());
562     SkASSERT(!fUniqueHash.count());
563     SkASSERT(!fCount);
564     SkASSERT(!this->getResourceCount());
565     SkASSERT(!fBytes);
566     SkASSERT(!fBudgetedCount);
567     SkASSERT(!fBudgetedBytes);
568     SkASSERT(!fPurgeableBytes);
569 }
570 
releaseByTag(const GrGpuResourceTag & tag)571 void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
572     AutoValidate av(this);
573     this->processFreedGpuResources();
574     SkASSERT(fProxyProvider); // better have called setProxyProvider
575     std::vector<GrGpuResource*> recycleVector;
576     for (int i = 0; i < fNonpurgeableResources.size(); i++) {
577         GrGpuResource* resource = fNonpurgeableResources[i];
578         if (tag.filter(resource->getResourceTag())) {
579             recycleVector.emplace_back(resource);
580             if (resource->getUniqueKey().isValid()) {
581                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
582                                                         GrProxyProvider::InvalidateGPUResource::kNo);
583             }
584         }
585     }
586 
587     for (int i = 0; i < fPurgeableQueue.count(); i++) {
588         GrGpuResource* resource = fPurgeableQueue.at(i);
589         if (tag.filter(resource->getResourceTag())) {
590             recycleVector.emplace_back(resource);
591             if (resource->getUniqueKey().isValid()) {
592                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
593                                                         GrProxyProvider::InvalidateGPUResource::kNo);
594             }
595         }
596     }
597 
598     for (auto resource : recycleVector) {
599         SkASSERT(!resource->wasDestroyed());
600         resource->cacheAccess().release();
601     }
602 }
603 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)604 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
605     if (tag.isGrTagValid()) {
606         grResourceTagCacheStack.push(tag);
607         return;
608     }
609     if (!grResourceTagCacheStack.empty()) {
610         grResourceTagCacheStack.pop();
611     }
612 }
613 
popGrResourceTag()614 void GrResourceCache::popGrResourceTag()
615 {
616     if (!grResourceTagCacheStack.empty()) {
617         grResourceTagCacheStack.pop();
618     }
619 }
620 
getCurrentGrResourceTag() const621 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
622     if (grResourceTagCacheStack.empty()) {
623         return{};
624     }
625     return grResourceTagCacheStack.top();
626 }
627 
getAllGrGpuResourceTags() const628 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
629     std::set<GrGpuResourceTag> result;
630     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
631         auto tag = fNonpurgeableResources[i]->getResourceTag();
632         result.insert(tag);
633     }
634     return result;
635 }
636 
637 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)638 void GrResourceCache::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
639 {
640     fUpdatedBytesOfPid.swap(out);
641 }
642 
643 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)644 void GrResourceCache::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
645 {
646     if (fMemoryOverflowCallback_ == nullptr) {
647         fMemoryOverflowCallback_ = callback;
648         fMemoryControl_ = size;
649     }
650 }
651 
652 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const653 bool GrResourceCache::isPidAbnormal() const
654 {
655     return fExitedPid_.find(getCurrentGrResourceTag().fPid) != fExitedPid_.end();
656 }
657 
658 // OH ISSUE: change the fbyte when the resource tag changes.
changeByteOfPid(int32_t beforePid,int32_t afterPid,size_t bytes,bool beforeRealAlloc,bool afterRealAlloc)659 void GrResourceCache::changeByteOfPid(int32_t beforePid, int32_t afterPid,
660     size_t bytes, bool beforeRealAlloc, bool afterRealAlloc)
661 {
662     if (beforePid && beforeRealAlloc) {
663         auto& pidSize = fBytesOfPid[beforePid];
664         pidSize -= bytes;
665         fUpdatedBytesOfPid[beforePid] = pidSize;
666         if (pidSize == 0) {
667             fBytesOfPid.erase(beforePid);
668         }
669     }
670     if (afterPid && afterRealAlloc) {
671         auto& size = fBytesOfPid[afterPid];
672         size += bytes;
673         fUpdatedBytesOfPid[afterPid] = size;
674     }
675 }
676 
refResource(GrGpuResource * resource)677 void GrResourceCache::refResource(GrGpuResource* resource) {
678     SkASSERT(resource);
679     SkASSERT(resource->getContext()->priv().getResourceCache() == this);
680     if (resource->cacheAccess().hasRef()) {
681         resource->ref();
682     } else {
683         this->refAndMakeResourceMRU(resource);
684     }
685     this->validate();
686 }
687 
findAndRefScratchResource(const skgpu::ScratchKey & scratchKey)688 GrGpuResource* GrResourceCache::findAndRefScratchResource(const skgpu::ScratchKey& scratchKey) {
689     SkASSERT(scratchKey.isValid());
690 
691     GrGpuResource* resource = fScratchMap.find(scratchKey);
692     if (resource) {
693         fScratchMap.remove(scratchKey, resource);
694         this->refAndMakeResourceMRU(resource);
695         this->validate();
696     }
697     return resource;
698 }
699 
willRemoveScratchKey(const GrGpuResource * resource)700 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
701     ASSERT_SINGLE_OWNER
702     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
703     if (resource->cacheAccess().isUsableAsScratch()) {
704         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
705     }
706 }
707 
removeUniqueKey(GrGpuResource * resource)708 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
709     ASSERT_SINGLE_OWNER
710     // Someone has a ref to this resource in order to have removed the key. When the ref count
711     // reaches zero we will get a ref cnt notification and figure out what to do with it.
712     if (resource->getUniqueKey().isValid()) {
713         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
714         fUniqueHash.remove(resource->getUniqueKey());
715     }
716     resource->cacheAccess().removeUniqueKey();
717     if (resource->cacheAccess().isUsableAsScratch()) {
718         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
719     }
720 
721     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
722     // require purging. However, the resource must be ref'ed to get here and therefore can't
723     // be purgeable. We'll purge it when the refs reach zero.
724     SkASSERT(!resource->resourcePriv().isPurgeable());
725     this->validate();
726 }
727 
changeUniqueKey(GrGpuResource * resource,const skgpu::UniqueKey & newKey)728 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
729     ASSERT_SINGLE_OWNER
730     SkASSERT(resource);
731     SkASSERT(this->isInCache(resource));
732 
733     // If another resource has the new key, remove its key then install the key on this resource.
734     if (newKey.isValid()) {
735         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
736             // If the old resource using the key is purgeable and is unreachable, then remove it.
737             if (!old->resourcePriv().getScratchKey().isValid() &&
738                 old->resourcePriv().isPurgeable()) {
739                 old->cacheAccess().release();
740             } else {
741                 // removeUniqueKey expects an external owner of the resource.
742                 this->removeUniqueKey(sk_ref_sp(old).get());
743             }
744         }
745         SkASSERT(nullptr == fUniqueHash.find(newKey));
746 
747         // Remove the entry for this resource if it already has a unique key.
748         if (resource->getUniqueKey().isValid()) {
749             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
750             fUniqueHash.remove(resource->getUniqueKey());
751             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
752         } else {
753             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
754             // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
755             // unique key until after this check.
756             if (resource->cacheAccess().isUsableAsScratch()) {
757                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
758             }
759         }
760 
761         resource->cacheAccess().setUniqueKey(newKey);
762         fUniqueHash.add(resource);
763     } else {
764         this->removeUniqueKey(resource);
765     }
766 
767     this->validate();
768 }
769 
refAndMakeResourceMRU(GrGpuResource * resource)770 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
771     ASSERT_SINGLE_OWNER
772     SkASSERT(resource);
773     SkASSERT(this->isInCache(resource));
774 
775     if (resource->resourcePriv().isPurgeable()) {
776         // It's about to become unpurgeable.
777         fPurgeableBytes -= resource->gpuMemorySize();
778         fPurgeableQueue.remove(resource);
779         this->addToNonpurgeableArray(resource);
780     } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
781                resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
782         SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
783         fNumBudgetedResourcesFlushWillMakePurgeable--;
784     }
785     resource->cacheAccess().ref();
786 
787     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
788     this->validate();
789 }
790 
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)791 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
792                                                GrGpuResource::LastRemovedRef removedRef) {
793     ASSERT_SINGLE_OWNER
794     SkASSERT(resource);
795     SkASSERT(!resource->wasDestroyed());
796     SkASSERT(this->isInCache(resource));
797     // This resource should always be in the nonpurgeable array when this function is called. It
798     // will be moved to the queue if it is newly purgeable.
799     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
800 
801     if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
802         if (resource->cacheAccess().isUsableAsScratch()) {
803             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
804         }
805     }
806 
807     if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
808         this->validate();
809         return;
810     }
811 
812 #ifdef SK_DEBUG
813     // When the timestamp overflows validate() is called. validate() checks that resources in
814     // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
815     // the purgeable queue happens just below in this function. So we mark it as an exception.
816     if (resource->resourcePriv().isPurgeable()) {
817         fNewlyPurgeableResourceForValidation = resource;
818     }
819 #endif
820     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
821     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
822 
823     if (!resource->resourcePriv().isPurgeable() &&
824         resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
825         ++fNumBudgetedResourcesFlushWillMakePurgeable;
826     }
827 
828     if (!resource->resourcePriv().isPurgeable()) {
829         this->validate();
830         return;
831     }
832 
833     this->removeFromNonpurgeableArray(resource);
834     fPurgeableQueue.insert(resource);
835     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
836     fPurgeableBytes += resource->gpuMemorySize();
837 
838     bool hasUniqueKey = resource->getUniqueKey().isValid();
839 
840     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
841 
842     if (budgetedType == GrBudgetedType::kBudgeted) {
843         // Purge the resource immediately if we're over budget
844         // Also purge if the resource has neither a valid scratch key nor a unique key.
845         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
846         if (!this->overBudget() && hasKey) {
847             return;
848         }
849     } else {
850         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
851         // they can be reused again by the image connected to the unique key.
852         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
853             return;
854         }
855         // Check whether this resource could still be used as a scratch resource.
856         if (!resource->resourcePriv().refsWrappedObjects() &&
857             resource->resourcePriv().getScratchKey().isValid()) {
858             // We won't purge an existing resource to make room for this one.
859             if (this->wouldFit(resource->gpuMemorySize())) {
860                 resource->resourcePriv().makeBudgeted();
861                 return;
862             }
863         }
864     }
865 
866     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
867     resource->cacheAccess().release();
868     // We should at least free this resource, perhaps dependent resources as well.
869     SkASSERT(this->getResourceCount() < beforeCount);
870     this->validate();
871 }
872 
didChangeBudgetStatus(GrGpuResource * resource)873 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
874     ASSERT_SINGLE_OWNER
875     SkASSERT(resource);
876     SkASSERT(this->isInCache(resource));
877 
878     size_t size = resource->gpuMemorySize();
879     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
880     // resource become purgeable. However, we should never allow that transition. Wrapped
881     // resources are the only resources that can be in that state and they aren't allowed to
882     // transition from one budgeted state to another.
883     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
884     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
885         ++fBudgetedCount;
886         fBudgetedBytes += size;
887 #if GR_CACHE_STATS
888         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
889         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
890 #endif
891         if (!resource->resourcePriv().isPurgeable() &&
892             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
893             ++fNumBudgetedResourcesFlushWillMakePurgeable;
894         }
895         if (resource->cacheAccess().isUsableAsScratch()) {
896             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
897         }
898         this->purgeAsNeeded();
899     } else {
900         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
901 #ifdef SKIA_OHOS
902         GrPerfMonitorReporter::GetInstance().recordTextureCache(resource->getResourceTag().fName);
903 #endif
904         --fBudgetedCount;
905         fBudgetedBytes -= size;
906         if (!resource->resourcePriv().isPurgeable() &&
907             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
908             --fNumBudgetedResourcesFlushWillMakePurgeable;
909         }
910         if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
911             resource->resourcePriv().getScratchKey().isValid()) {
912             fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
913         }
914     }
915     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
916     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
917                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
918 
919     this->validate();
920 }
921 
purgeAsNeeded()922 void GrResourceCache::purgeAsNeeded() {
923     TArray<skgpu::UniqueKeyInvalidatedMessage> invalidKeyMsgs;
924     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
925     if (!invalidKeyMsgs.empty()) {
926         SkASSERT(fProxyProvider);
927 
928         for (int i = 0; i < invalidKeyMsgs.size(); ++i) {
929             if (invalidKeyMsgs[i].inThreadSafeCache()) {
930                 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
931                 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
932             } else {
933                 fProxyProvider->processInvalidUniqueKey(
934                                                     invalidKeyMsgs[i].key(), nullptr,
935                                                     GrProxyProvider::InvalidateGPUResource::kYes);
936                 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
937             }
938         }
939     }
940 
941     this->processFreedGpuResources();
942 
943     bool stillOverbudget = this->overBudget();
944     while (stillOverbudget && fPurgeableQueue.count()) {
945         GrGpuResource* resource = fPurgeableQueue.peek();
946         SkASSERT(resource->resourcePriv().isPurgeable());
947         resource->cacheAccess().release();
948         stillOverbudget = this->overBudget();
949     }
950 
951     if (stillOverbudget) {
952         fThreadSafeCache->dropUniqueRefs(this);
953 
954         stillOverbudget = this->overBudget();
955         while (stillOverbudget && fPurgeableQueue.count()) {
956             GrGpuResource* resource = fPurgeableQueue.peek();
957             SkASSERT(resource->resourcePriv().isPurgeable());
958             resource->cacheAccess().release();
959             stillOverbudget = this->overBudget();
960         }
961     }
962 
963     this->validate();
964 }
965 
purgeUnlockedResources(const skgpu::StdSteadyClock::time_point * purgeTime,GrPurgeResourceOptions opts)966 void GrResourceCache::purgeUnlockedResources(const skgpu::StdSteadyClock::time_point* purgeTime,
967                                              GrPurgeResourceOptions opts) {
968 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
969     SimpleCacheInfo simpleCacheInfo;
970     traceBeforePurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
971 #endif
972     if (opts == GrPurgeResourceOptions::kAllResources) {
973         if (purgeTime) {
974             fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
975         } else {
976             fThreadSafeCache->dropUniqueRefs(nullptr);
977         }
978 
979         // We could disable maintaining the heap property here, but it would add a lot of
980         // complexity. Moreover, this is rarely called.
981         while (fPurgeableQueue.count()) {
982             GrGpuResource* resource = fPurgeableQueue.peek();
983 
984             const skgpu::StdSteadyClock::time_point resourceTime =
985                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
986             if (purgeTime && resourceTime >= *purgeTime) {
987                 // Resources were given both LRU timestamps and tagged with a frame number when
988                 // they first became purgeable. The LRU timestamp won't change again until the
989                 // resource is made non-purgeable again. So, at this point all the remaining
990                 // resources in the timestamp-sorted queue will have a frame number >= to this
991                 // one.
992                 break;
993             }
994 
995             SkASSERT(resource->resourcePriv().isPurgeable());
996             resource->cacheAccess().release();
997         }
998     } else {
999         SkASSERT(opts == GrPurgeResourceOptions::kScratchResourcesOnly);
1000         // Early out if the very first item is too new to purge to avoid sorting the queue when
1001         // nothing will be deleted.
1002         if (purgeTime && fPurgeableQueue.count() &&
1003             fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
1004 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1005             traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1006 #endif
1007             return;
1008         }
1009 
1010         // Sort the queue
1011         fPurgeableQueue.sort();
1012 
1013         // Make a list of the scratch resources to delete
1014         SkTDArray<GrGpuResource*> scratchResources;
1015         for (int i = 0; i < fPurgeableQueue.count(); i++) {
1016             GrGpuResource* resource = fPurgeableQueue.at(i);
1017 
1018             const skgpu::StdSteadyClock::time_point resourceTime =
1019                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
1020             if (purgeTime && resourceTime >= *purgeTime) {
1021                 // scratch or not, all later iterations will be too recently used to purge.
1022                 break;
1023             }
1024             SkASSERT(resource->resourcePriv().isPurgeable());
1025             if (!resource->getUniqueKey().isValid()) {
1026                 *scratchResources.append() = resource;
1027             }
1028         }
1029 
1030         // Delete the scratch resources. This must be done as a separate pass
1031         // to avoid messing up the sorted order of the queue
1032         for (int i = 0; i < scratchResources.size(); i++) {
1033             scratchResources[i]->cacheAccess().release();
1034         }
1035     }
1036 
1037     this->validate();
1038 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1039     traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1040 #endif
1041 }
1042 
purgeUnlockedResourcesByPid(bool scratchResourceOnly,const std::set<int> & exitedPidSet)1043 void GrResourceCache::purgeUnlockedResourcesByPid(bool scratchResourceOnly, const std::set<int>& exitedPidSet) {
1044 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1045     SimpleCacheInfo simpleCacheInfo;
1046     traceBeforePurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1047 #endif
1048     // Sort the queue
1049     fPurgeableQueue.sort();
1050 
1051     //Make lists of the need purged resources to delete
1052     fThreadSafeCache->dropUniqueRefs(nullptr);
1053     SkTDArray<GrGpuResource*> exitPidResources;
1054     SkTDArray<GrGpuResource*> scratchResources;
1055     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1056         GrGpuResource* resource = fPurgeableQueue.at(i);
1057         SkASSERT(resource->resourcePriv().isPurgeable());
1058         if (exitedPidSet.count(resource->getResourceTag().fPid)) {
1059             *exitPidResources.append() = resource;
1060         } else if (!resource->getUniqueKey().isValid()) {
1061             *scratchResources.append() = resource;
1062         }
1063     }
1064 
1065     //Delete the exited pid and scatch resource. This must be done as a separate pass
1066     //to avoid messing up the sorted order of the queue
1067     for (int i = 0; i < exitPidResources.size(); i++) {
1068         exitPidResources[i]->cacheAccess().release();
1069     }
1070     for (int i = 0; i < scratchResources.size(); i++) {
1071         scratchResources[i]->cacheAccess().release();
1072     }
1073 
1074     for (auto pid : exitedPidSet) {
1075         fExitedPid_.erase(pid);
1076     }
1077 
1078     this->validate();
1079 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1080     traceAfterPurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1081 #endif
1082 }
1083 
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag & tag)1084 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
1085     // Sort the queue
1086     fPurgeableQueue.sort();
1087 
1088     //Make a list of the scratch resources to delete
1089     SkTDArray<GrGpuResource*> scratchResources;
1090     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1091         GrGpuResource* resource = fPurgeableQueue.at(i);
1092         SkASSERT(resource->resourcePriv().isPurgeable());
1093         if (tag.filter(resource->getResourceTag()) && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1094             *scratchResources.append() = resource;
1095         }
1096     }
1097 
1098     //Delete the scatch resource. This must be done as a separate pass
1099     //to avoid messing up the sorted order of the queue
1100     for (int i = 0; i <scratchResources.size(); i++) {
1101         scratchResources[i]->cacheAccess().release();
1102     }
1103 
1104     this->validate();
1105 }
1106 
purgeToMakeHeadroom(size_t desiredHeadroomBytes)1107 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
1108     AutoValidate av(this);
1109     if (desiredHeadroomBytes > fMaxBytes) {
1110         return false;
1111     }
1112     if (this->wouldFit(desiredHeadroomBytes)) {
1113         return true;
1114     }
1115     fPurgeableQueue.sort();
1116 
1117     size_t projectedBudget = fBudgetedBytes;
1118     int purgeCnt = 0;
1119     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1120         GrGpuResource* resource = fPurgeableQueue.at(i);
1121         if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1122             projectedBudget -= resource->gpuMemorySize();
1123         }
1124         if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
1125             purgeCnt = i + 1;
1126             break;
1127         }
1128     }
1129     if (purgeCnt == 0) {
1130         return false;
1131     }
1132 
1133     // Success! Release the resources.
1134     // Copy to array first so we don't mess with the queue.
1135     std::vector<GrGpuResource*> resources;
1136     resources.reserve(purgeCnt);
1137     for (int i = 0; i < purgeCnt; i++) {
1138         resources.push_back(fPurgeableQueue.at(i));
1139     }
1140     for (GrGpuResource* resource : resources) {
1141         resource->cacheAccess().release();
1142     }
1143     return true;
1144 }
1145 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)1146 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
1147 
1148     const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
1149     bool stillOverbudget = tmpByteBudget < fBytes;
1150 
1151     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
1152         // Sort the queue
1153         fPurgeableQueue.sort();
1154 
1155         // Make a list of the scratch resources to delete
1156         SkTDArray<GrGpuResource*> scratchResources;
1157         size_t scratchByteCount = 0;
1158         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
1159             GrGpuResource* resource = fPurgeableQueue.at(i);
1160             SkASSERT(resource->resourcePriv().isPurgeable());
1161             if (!resource->getUniqueKey().isValid()) {
1162                 *scratchResources.append() = resource;
1163                 scratchByteCount += resource->gpuMemorySize();
1164                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
1165             }
1166         }
1167 
1168         // Delete the scratch resources. This must be done as a separate pass
1169         // to avoid messing up the sorted order of the queue
1170         for (int i = 0; i < scratchResources.size(); i++) {
1171             scratchResources[i]->cacheAccess().release();
1172         }
1173         stillOverbudget = tmpByteBudget < fBytes;
1174 
1175         this->validate();
1176     }
1177 
1178     // Purge any remaining resources in LRU order
1179     if (stillOverbudget) {
1180         const size_t cachedByteCount = fMaxBytes;
1181         fMaxBytes = tmpByteBudget;
1182         this->purgeAsNeeded();
1183         fMaxBytes = cachedByteCount;
1184     }
1185 }
1186 
requestsFlush() const1187 bool GrResourceCache::requestsFlush() const {
1188     return this->overBudget() && !fPurgeableQueue.count() &&
1189            fNumBudgetedResourcesFlushWillMakePurgeable > 0;
1190 }
1191 
processFreedGpuResources()1192 void GrResourceCache::processFreedGpuResources() {
1193     TArray<UnrefResourceMessage> msgs;
1194     fUnrefResourceInbox.poll(&msgs);
1195     // We don't need to do anything other than let the messages delete themselves and call unref.
1196 }
1197 
addToNonpurgeableArray(GrGpuResource * resource)1198 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
1199     int index = fNonpurgeableResources.size();
1200     *fNonpurgeableResources.append() = resource;
1201     *resource->cacheAccess().accessCacheIndex() = index;
1202 }
1203 
removeFromNonpurgeableArray(GrGpuResource * resource)1204 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
1205     int* index = resource->cacheAccess().accessCacheIndex();
1206     // Fill the hole we will create in the array with the tail object, adjust its index, and
1207     // then pop the array
1208     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
1209     SkASSERT(fNonpurgeableResources[*index] == resource);
1210     fNonpurgeableResources[*index] = tail;
1211     *tail->cacheAccess().accessCacheIndex() = *index;
1212     fNonpurgeableResources.pop_back();
1213     SkDEBUGCODE(*index = -1);
1214 }
1215 
getNextTimestamp()1216 uint32_t GrResourceCache::getNextTimestamp() {
1217     // If we wrap then all the existing resources will appear older than any resources that get
1218     // a timestamp after the wrap.
1219     if (0 == fTimestamp) {
1220         int count = this->getResourceCount();
1221         if (count) {
1222             // Reset all the timestamps. We sort the resources by timestamp and then assign
1223             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
1224             // rare.
1225             SkTDArray<GrGpuResource*> sortedPurgeableResources;
1226             sortedPurgeableResources.reserve(fPurgeableQueue.count());
1227 
1228             while (fPurgeableQueue.count()) {
1229                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
1230                 fPurgeableQueue.pop();
1231             }
1232 
1233             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
1234                      CompareTimestamp);
1235 
1236             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
1237             // timestamp and assign new timestamps.
1238             int currP = 0;
1239             int currNP = 0;
1240             while (currP < sortedPurgeableResources.size() &&
1241                    currNP < fNonpurgeableResources.size()) {
1242                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
1243                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
1244                 SkASSERT(tsP != tsNP);
1245                 if (tsP < tsNP) {
1246                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1247                 } else {
1248                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
1249                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1250                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1251                 }
1252             }
1253 
1254             // The above loop ended when we hit the end of one array. Finish the other one.
1255             while (currP < sortedPurgeableResources.size()) {
1256                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1257             }
1258             while (currNP < fNonpurgeableResources.size()) {
1259                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1260                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1261             }
1262 
1263             // Rebuild the queue.
1264             for (int i = 0; i < sortedPurgeableResources.size(); ++i) {
1265                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
1266             }
1267 
1268             this->validate();
1269             SkASSERT(count == this->getResourceCount());
1270 
1271             // count should be the next timestamp we return.
1272             SkASSERT(fTimestamp == SkToU32(count));
1273         }
1274     }
1275     return fTimestamp++;
1276 }
1277 
1278 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpAllResource(std::stringstream & dump) const1279 void GrResourceCache::dumpAllResource(std::stringstream &dump) const {
1280     if (getResourceCount() == 0) {
1281         return;
1282     }
1283     dump << "Purgeable: " << fPurgeableQueue.count() << std::endl;
1284     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1285         GrGpuResource* resource = fPurgeableQueue.at(i);
1286         if (resource == nullptr) {
1287             continue;
1288         }
1289         if (strcmp(resource->getResourceType(), "VkImage") != 0) {
1290             continue;
1291         }
1292         resource->dumpVkImageInfo(dump);
1293     }
1294     dump << "Non-Purgeable: " << fNonpurgeableResources.size() << std::endl;
1295     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1296         GrGpuResource* resource = fNonpurgeableResources[i];
1297         if (resource == nullptr) {
1298             continue;
1299         }
1300         if (strcmp(resource->getResourceType(), "VkImage") != 0) {
1301             continue;
1302         }
1303         resource->dumpVkImageInfo(dump);
1304     }
1305 #ifdef SK_VULKAN
1306     dump << "Destroy Record: " << std::endl;
1307     ParallelDebug::DumpAllDestroyVkImage(dump);
1308 #endif
1309 }
1310 #endif
1311 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const1312 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
1313     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1314         fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1315     }
1316     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1317         fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1318     }
1319 }
1320 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,GrGpuResourceTag & tag) const1321 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, GrGpuResourceTag& tag) const {
1322     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1323         if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
1324             fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1325         }
1326     }
1327     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1328         if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
1329             fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1330         }
1331     }
1332 }
1333 
1334 #if GR_CACHE_STATS
getStats(Stats * stats) const1335 void GrResourceCache::getStats(Stats* stats) const {
1336     stats->reset();
1337 
1338     stats->fTotal = this->getResourceCount();
1339     stats->fNumNonPurgeable = fNonpurgeableResources.size();
1340     stats->fNumPurgeable = fPurgeableQueue.count();
1341 
1342     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1343         stats->update(fNonpurgeableResources[i]);
1344     }
1345     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1346         stats->update(fPurgeableQueue.at(i));
1347     }
1348 }
1349 
1350 #if defined(GPU_TEST_UTILS)
dumpStats(SkString * out) const1351 void GrResourceCache::dumpStats(SkString* out) const {
1352     this->validate();
1353 
1354     Stats stats;
1355 
1356     this->getStats(&stats);
1357 
1358     float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1359 
1360     out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1361     out->appendf("\t\tEntry Count: current %d"
1362                  " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1363                  stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1364                  stats.fScratch, fHighWaterCount);
1365     out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1366                  SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1367                  SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1368 }
1369 
dumpStatsKeyValuePairs(TArray<SkString> * keys,TArray<double> * values) const1370 void GrResourceCache::dumpStatsKeyValuePairs(TArray<SkString>* keys,
1371                                              TArray<double>* values) const {
1372     this->validate();
1373 
1374     Stats stats;
1375     this->getStats(&stats);
1376 
1377     keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1378 }
1379 #endif // defined(GPU_TEST_UTILS)
1380 #endif // GR_CACHE_STATS
1381 
1382 #ifdef SK_DEBUG
validate() const1383 void GrResourceCache::validate() const {
1384     // Reduce the frequency of validations for large resource counts.
1385     static SkRandom gRandom;
1386     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1387     if (~mask && (gRandom.nextU() & mask)) {
1388         return;
1389     }
1390 
1391     struct Stats {
1392         size_t fBytes;
1393         int fBudgetedCount;
1394         size_t fBudgetedBytes;
1395         int fLocked;
1396         int fScratch;
1397         int fCouldBeScratch;
1398         int fContent;
1399         const ScratchMap* fScratchMap;
1400         const UniqueHash* fUniqueHash;
1401 
1402         Stats(const GrResourceCache* cache) {
1403             memset(this, 0, sizeof(*this));
1404             fScratchMap = &cache->fScratchMap;
1405             fUniqueHash = &cache->fUniqueHash;
1406         }
1407 
1408         void update(GrGpuResource* resource) {
1409             fBytes += resource->gpuMemorySize();
1410 
1411             if (!resource->resourcePriv().isPurgeable()) {
1412                 ++fLocked;
1413             }
1414 
1415             const skgpu::ScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1416             const skgpu::UniqueKey& uniqueKey = resource->getUniqueKey();
1417 
1418             if (resource->cacheAccess().isUsableAsScratch()) {
1419                 SkASSERT(!uniqueKey.isValid());
1420                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1421                 SkASSERT(!resource->cacheAccess().hasRef());
1422                 ++fScratch;
1423                 SkASSERT(fScratchMap->countForKey(scratchKey));
1424                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1425             } else if (scratchKey.isValid()) {
1426                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1427                          uniqueKey.isValid() || resource->cacheAccess().hasRef());
1428                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1429                 SkASSERT(!fScratchMap->has(resource, scratchKey));
1430             }
1431             if (uniqueKey.isValid()) {
1432                 ++fContent;
1433                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1434                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1435                          resource->resourcePriv().refsWrappedObjects());
1436             }
1437 
1438             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1439                 ++fBudgetedCount;
1440                 fBudgetedBytes += resource->gpuMemorySize();
1441             }
1442         }
1443     };
1444 
1445     {
1446         int count = 0;
1447         fScratchMap.foreach([&](const GrGpuResource& resource) {
1448             SkASSERT(resource.cacheAccess().isUsableAsScratch());
1449             count++;
1450         });
1451         SkASSERT(count == fScratchMap.count());
1452     }
1453 
1454     Stats stats(this);
1455     size_t purgeableBytes = 0;
1456     int numBudgetedResourcesFlushWillMakePurgeable = 0;
1457 
1458     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1459         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1460                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1461         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1462         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1463         if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1464             !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1465             fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1466             ++numBudgetedResourcesFlushWillMakePurgeable;
1467         }
1468         stats.update(fNonpurgeableResources[i]);
1469     }
1470     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1471         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1472         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1473         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1474         stats.update(fPurgeableQueue.at(i));
1475         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1476     }
1477 
1478     SkASSERT(fCount == this->getResourceCount());
1479     SkASSERT(fBudgetedCount <= fCount);
1480     SkASSERT(fBudgetedBytes <= fBytes);
1481     SkASSERT(stats.fBytes == fBytes);
1482     SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1483              numBudgetedResourcesFlushWillMakePurgeable);
1484     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1485     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1486     SkASSERT(purgeableBytes == fPurgeableBytes);
1487 #if GR_CACHE_STATS
1488     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1489     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1490     SkASSERT(fBytes <= fHighWaterBytes);
1491     SkASSERT(fCount <= fHighWaterCount);
1492     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1493     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1494 #endif
1495     SkASSERT(stats.fContent == fUniqueHash.count());
1496     SkASSERT(stats.fScratch == fScratchMap.count());
1497 
1498     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1499     // calls. This will be fixed when subresource registration is explicit.
1500     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1501     // SkASSERT(!overBudget || locked == count || fPurging);
1502 }
1503 
isInCache(const GrGpuResource * resource) const1504 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1505     int index = *resource->cacheAccess().accessCacheIndex();
1506     if (index < 0) {
1507         return false;
1508     }
1509     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1510         return true;
1511     }
1512     if (index < fNonpurgeableResources.size() && fNonpurgeableResources[index] == resource) {
1513         return true;
1514     }
1515     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1516     return false;
1517 }
1518 
1519 #endif // SK_DEBUG
1520 
1521 #if defined(GPU_TEST_UTILS)
1522 
countUniqueKeysWithTag(const char * tag) const1523 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1524     int count = 0;
1525     fUniqueHash.foreach([&](const GrGpuResource& resource){
1526         if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1527             ++count;
1528         }
1529     });
1530     return count;
1531 }
1532 
changeTimestamp(uint32_t newTimestamp)1533 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1534     fTimestamp = newTimestamp;
1535 }
1536 
visitSurfaces(const std::function<void (const GrSurface *,bool purgeable)> & func) const1537 void GrResourceCache::visitSurfaces(
1538         const std::function<void(const GrSurface*, bool purgeable)>& func) const {
1539 
1540     for (int i = 0; i < fNonpurgeableResources.size(); ++i) {
1541         if (const GrSurface* surf = fNonpurgeableResources[i]->asSurface()) {
1542             func(surf, /* purgeable= */ false);
1543         }
1544     }
1545     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1546         if (const GrSurface* surf = fPurgeableQueue.at(i)->asSurface()) {
1547             func(surf, /* purgeable= */ true);
1548         }
1549     }
1550 }
1551 
1552 #endif // defined(GPU_TEST_UTILS)
1553