• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <ctime>
11 #include <vector>
12 #include <map>
13 #include <sstream>
14 #ifdef NOT_BUILD_FOR_OHOS_SDK
15 #include <parameters.h>
16 #endif
17 #include "include/core/SkString.h"
18 #include "include/gpu/GrDirectContext.h"
19 #include "include/private/GrSingleOwner.h"
20 #include "include/private/SkTo.h"
21 #include "include/utils/SkRandom.h"
22 #include "src/core/SkMessageBus.h"
23 #include "src/core/SkOpts.h"
24 #include "src/core/SkScopeExit.h"
25 #include "src/core/SkTSort.h"
26 #include "src/gpu/GrCaps.h"
27 #include "src/gpu/GrDirectContextPriv.h"
28 #include "src/gpu/GrGpuResourceCacheAccess.h"
29 #include "src/gpu/GrProxyProvider.h"
30 #ifdef SKIA_OHOS
31 #include "src/gpu/GrPerfMonitorReporter.h"
32 #endif
33 #include "src/gpu/GrTexture.h"
34 #include "src/gpu/GrTextureProxyCacheAccess.h"
35 #include "src/gpu/GrThreadSafeCache.h"
36 #include "src/gpu/GrTracing.h"
37 #include "src/gpu/SkGr.h"
38 
39 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
40 
41 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
42 
43 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
44 
45 //////////////////////////////////////////////////////////////////////////////
46 
GenerateResourceType()47 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
48     static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
49 
50     int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
51     if (type > SkTo<int32_t>(UINT16_MAX)) {
52         SK_ABORT("Too many Resource Types");
53     }
54 
55     return static_cast<ResourceType>(type);
56 }
57 
GenerateDomain()58 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
59     static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
60 
61     int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
62     if (domain > SkTo<int32_t>(UINT16_MAX)) {
63         SK_ABORT("Too many GrUniqueKey Domains");
64     }
65 
66     return static_cast<Domain>(domain);
67 }
68 
GrResourceKeyHash(const uint32_t * data,size_t size)69 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
70     return SkOpts::hash(data, size);
71 }
72 
73 //////////////////////////////////////////////////////////////////////////////
74 
75 class GrResourceCache::AutoValidate : ::SkNoncopyable {
76 public:
AutoValidate(GrResourceCache * cache)77     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()78     ~AutoValidate() { fCache->validate(); }
79 private:
80     GrResourceCache* fCache;
81 };
82 
83 //////////////////////////////////////////////////////////////////////////////
84 
85 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
86 
TextureAwaitingUnref(GrTexture * texture)87 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
88         : fTexture(texture), fNumUnrefs(1) {}
89 
TextureAwaitingUnref(TextureAwaitingUnref && that)90 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
91     fTexture = std::exchange(that.fTexture, nullptr);
92     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
93 }
94 
operator =(TextureAwaitingUnref && that)95 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
96         TextureAwaitingUnref&& that) {
97     fTexture = std::exchange(that.fTexture, nullptr);
98     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
99     return *this;
100 }
101 
~TextureAwaitingUnref()102 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
103     if (fTexture) {
104         for (int i = 0; i < fNumUnrefs; ++i) {
105             fTexture->unref();
106         }
107     }
108 }
109 
addRef()110 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
111 
unref()112 inline void GrResourceCache::TextureAwaitingUnref::unref() {
113     SkASSERT(fNumUnrefs > 0);
114     fTexture->unref();
115     --fNumUnrefs;
116 }
117 
finished()118 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
119 
120 //////////////////////////////////////////////////////////////////////////////
121 
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)122 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
123                                  GrDirectContext::DirectContextID owningContextID,
124                                  uint32_t familyID)
125         : fInvalidUniqueKeyInbox(familyID)
126         , fFreedTextureInbox(owningContextID)
127         , fOwningContextID(owningContextID)
128         , fContextUniqueID(familyID)
129         , fSingleOwner(singleOwner) {
130     SkASSERT(owningContextID.isValid());
131     SkASSERT(familyID != SK_InvalidUniqueID);
132 #ifdef NOT_BUILD_FOR_OHOS_SDK
133     static int overtimeDuration = std::atoi(
134             OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_overtime", "600")
135                     .c_str());
136     static double maxBytesRate = std::atof(
137             OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_max_rate", "0.9")
138                     .c_str());
139 #else
140     static int overtimeDuration = 600;
141     static double maxBytesRate = 0.9;
142 #endif
143     fMaxBytesRate = maxBytesRate;
144     fOvertimeDuration = overtimeDuration;
145 }
146 
~GrResourceCache()147 GrResourceCache::~GrResourceCache() {
148     this->releaseAll();
149 }
150 
setLimit(size_t bytes)151 void GrResourceCache::setLimit(size_t bytes) {
152     fMaxBytes = bytes;
153     this->purgeAsNeeded();
154 }
155 
156 #ifdef SKIA_DFX_FOR_OHOS
157 static constexpr int MB = 1024 * 1024;
158 
159 #ifdef SKIA_OHOS
160 bool GrResourceCache::purgeUnlocakedResTraceEnabled_ =
161     std::atoi((OHOS::system::GetParameter("sys.graphic.skia.cache.debug", "0").c_str())) == 1;
162 #endif
163 
dumpInfo(SkString * out)164 void GrResourceCache::dumpInfo(SkString* out) {
165     if (out == nullptr) {
166         SkDebugf("OHOS GrResourceCache::dumpInfo outPtr is nullptr!");
167         return;
168     }
169     auto info = cacheInfo();
170     constexpr uint8_t STEP_INDEX = 1;
171     SkTArray<SkString> lines;
172     SkStrSplit(info.substr(STEP_INDEX, info.length() - STEP_INDEX).c_str(), ";", &lines);
173     for (int i = 0; i < lines.size(); ++i) {
174         out->appendf("    %s\n", lines[i].c_str());
175     }
176 }
177 
cacheInfo()178 std::string GrResourceCache::cacheInfo()
179 {
180     auto fPurgeableQueueInfoStr = cacheInfoPurgeableQueue();
181     auto fNonpurgeableResourcesInfoStr = cacheInfoNoPurgeableQueue();
182     size_t fRealAllocBytes = cacheInfoRealAllocSize();
183     auto fRealAllocInfoStr = cacheInfoRealAllocQueue();
184     auto fRealBytesOfPidInfoStr = realBytesOfPid();
185 
186     std::ostringstream cacheInfoStream;
187     cacheInfoStream << "[fPurgeableQueueInfoStr.count : " << fPurgeableQueue.count()
188         << "; fNonpurgeableResources.count : " << fNonpurgeableResources.count()
189         << "; fBudgetedBytes : " << fBudgetedBytes
190         << "(" << static_cast<size_t>(fBudgetedBytes / MB)
191         << " MB) / " << fMaxBytes
192         << "(" << static_cast<size_t>(fMaxBytes / MB)
193         << " MB); fBudgetedCount : " << fBudgetedCount
194         << "; fBytes : " << fBytes
195         << "(" << static_cast<size_t>(fBytes / MB)
196         << " MB); fPurgeableBytes : " << fPurgeableBytes
197         << "(" << static_cast<size_t>(fPurgeableBytes / MB)
198         << " MB); fAllocImageBytes : " << fAllocImageBytes
199         << "(" << static_cast<size_t>(fAllocImageBytes / MB)
200         << " MB); fAllocBufferBytes : " << fAllocBufferBytes
201         << "(" << static_cast<size_t>(fAllocBufferBytes / MB)
202         << " MB); fRealAllocBytes : " << fRealAllocBytes
203         << "(" << static_cast<size_t>(fRealAllocBytes / MB)
204         << " MB); fTimestamp : " << fTimestamp
205         << "; " << fPurgeableQueueInfoStr << "; " << fNonpurgeableResourcesInfoStr
206         << "; " << fRealAllocInfoStr << "; " << fRealBytesOfPidInfoStr;
207     return cacheInfoStream.str();
208 }
209 
210 #ifdef SKIA_OHOS
traceBeforePurgeUnlockRes(const std::string & method,SimpleCacheInfo & simpleCacheInfo)211 void GrResourceCache::traceBeforePurgeUnlockRes(const std::string& method, SimpleCacheInfo& simpleCacheInfo)
212 {
213     if (purgeUnlocakedResTraceEnabled_) {
214         StartTrace(HITRACE_TAG_GRAPHIC_AGP, method + " begin cacheInfo = " + cacheInfo());
215     } else {
216         simpleCacheInfo.fPurgeableQueueCount = fPurgeableQueue.count();
217         simpleCacheInfo.fNonpurgeableResourcesCount = fNonpurgeableResources.count();
218         simpleCacheInfo.fPurgeableBytes = fPurgeableBytes;
219         simpleCacheInfo.fBudgetedCount = fBudgetedCount;
220         simpleCacheInfo.fBudgetedBytes = fBudgetedBytes;
221         simpleCacheInfo.fAllocImageBytes = fAllocImageBytes;
222         simpleCacheInfo.fAllocBufferBytes = fAllocBufferBytes;
223     }
224 }
225 
traceAfterPurgeUnlockRes(const std::string & method,const SimpleCacheInfo & simpleCacheInfo)226 void GrResourceCache::traceAfterPurgeUnlockRes(const std::string& method, const SimpleCacheInfo& simpleCacheInfo)
227 {
228     if (purgeUnlocakedResTraceEnabled_) {
229         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s", method.c_str(), cacheInfo().c_str());
230         FinishTrace(HITRACE_TAG_GRAPHIC_AGP);
231     } else {
232         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s",
233             method.c_str(), cacheInfoComparison(simpleCacheInfo).c_str());
234     }
235 }
236 
cacheInfoComparison(const SimpleCacheInfo & simpleCacheInfo)237 std::string GrResourceCache::cacheInfoComparison(const SimpleCacheInfo& simpleCacheInfo)
238 {
239     std::ostringstream cacheInfoComparison;
240     cacheInfoComparison << "PurgeableCount : " << simpleCacheInfo.fPurgeableQueueCount
241         << " / " << fPurgeableQueue.count()
242         << "; NonpurgeableCount : " << simpleCacheInfo.fNonpurgeableResourcesCount
243         << " / " << fNonpurgeableResources.count()
244         << "; PurgeableBytes : " << simpleCacheInfo.fPurgeableBytes << " / " << fPurgeableBytes
245         << "; BudgetedCount : " << simpleCacheInfo.fBudgetedCount << " / " << fBudgetedCount
246         << "; BudgetedBytes : " << simpleCacheInfo.fBudgetedBytes << " / " << fBudgetedBytes
247         << "; AllocImageBytes : " << simpleCacheInfo.fAllocImageBytes << " / " << fAllocImageBytes
248         << "; AllocBufferBytes : " << simpleCacheInfo.fAllocBufferBytes << " / " << fAllocBufferBytes;
249     return cacheInfoComparison.str();
250 }
251 #endif // SKIA_OHOS
252 
cacheInfoPurgeableQueue()253 std::string GrResourceCache::cacheInfoPurgeableQueue()
254 {
255     std::map<uint32_t, size_t> purgSizeInfoWid;
256     std::map<uint32_t, int> purgCountInfoWid;
257     std::map<uint32_t, std::string> purgNameInfoWid;
258     std::map<uint32_t, int> purgPidInfoWid;
259 
260     std::map<uint32_t, size_t> purgSizeInfoPid;
261     std::map<uint32_t, int> purgCountInfoPid;
262     std::map<uint32_t, std::string> purgNameInfoPid;
263 
264     std::map<uint32_t, size_t> purgSizeInfoFid;
265     std::map<uint32_t, int> purgCountInfoFid;
266     std::map<uint32_t, std::string> purgNameInfoFid;
267 
268     int purgCountUnknown = 0;
269     size_t purgSizeUnknown = 0;
270 
271     for (int i = 0; i < fPurgeableQueue.count(); i++) {
272         auto resource = fPurgeableQueue.at(i);
273         if (!IsValidAddress(resource)) {
274             continue;
275         }
276         auto resourceTag = resource->getResourceTag();
277         if (resourceTag.fWid != 0) {
278             updatePurgeableWidMap(resource, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
279         } else if (resourceTag.fPid != 0) {
280             updatePurgeablePidMap(resource, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
281         } else if (resourceTag.fFid != 0) {
282             updatePurgeableFidMap(resource, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
283         } else {
284             purgCountUnknown++;
285             purgSizeUnknown += resource->gpuMemorySize();
286         }
287     }
288 
289     std::string infoStr;
290     if (purgSizeInfoWid.size() > 0) {
291         infoStr += ";PurgeableInfo_Node:[";
292         updatePurgeableWidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
293     }
294     if (purgSizeInfoPid.size() > 0) {
295         infoStr += ";PurgeableInfo_Pid:[";
296         updatePurgeablePidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgCountInfoWid);
297     }
298     if (purgSizeInfoFid.size() > 0) {
299         infoStr += ";PurgeableInfo_Fid:[";
300         updatePurgeableFidInfo(infoStr, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
301     }
302     updatePurgeableUnknownInfo(infoStr, ";PurgeableInfo_Unknown:", purgCountUnknown, purgSizeUnknown);
303     return infoStr;
304 }
305 
cacheInfoNoPurgeableQueue()306 std::string GrResourceCache::cacheInfoNoPurgeableQueue()
307 {
308     std::map<uint32_t, size_t> noPurgSizeInfoWid;
309     std::map<uint32_t, int> noPurgCountInfoWid;
310     std::map<uint32_t, std::string> noPurgNameInfoWid;
311     std::map<uint32_t, int> noPurgPidInfoWid;
312 
313     std::map<uint32_t, size_t> noPurgSizeInfoPid;
314     std::map<uint32_t, int> noPurgCountInfoPid;
315     std::map<uint32_t, std::string> noPurgNameInfoPid;
316 
317     std::map<uint32_t, size_t> noPurgSizeInfoFid;
318     std::map<uint32_t, int> noPurgCountInfoFid;
319     std::map<uint32_t, std::string> noPurgNameInfoFid;
320 
321     int noPurgCountUnknown = 0;
322     size_t noPurgSizeUnknown = 0;
323 
324     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
325         auto resource = fNonpurgeableResources[i];
326         if (resource == nullptr) {
327             continue;
328         }
329         auto resourceTag = resource->getResourceTag();
330         if (resourceTag.fWid != 0) {
331             updatePurgeableWidMap(resource, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
332         } else if (resourceTag.fPid != 0) {
333             updatePurgeablePidMap(resource, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
334         } else if (resourceTag.fFid != 0) {
335             updatePurgeableFidMap(resource, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
336         } else {
337             noPurgCountUnknown++;
338             noPurgSizeUnknown += resource->gpuMemorySize();
339         }
340     }
341 
342     std::string infoStr;
343     if (noPurgSizeInfoWid.size() > 0) {
344         infoStr += ";NonPurgeableInfo_Node:[";
345         updatePurgeableWidInfo(infoStr, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
346     }
347     if (noPurgSizeInfoPid.size() > 0) {
348         infoStr += ";NonPurgeableInfo_Pid:[";
349         updatePurgeablePidInfo(infoStr, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
350     }
351     if (noPurgSizeInfoFid.size() > 0) {
352         infoStr += ";NonPurgeableInfo_Fid:[";
353         updatePurgeableFidInfo(infoStr, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
354     }
355     updatePurgeableUnknownInfo(infoStr, ";NonPurgeableInfo_Unknown:", noPurgCountUnknown, noPurgSizeUnknown);
356     return infoStr;
357 }
358 
cacheInfoRealAllocSize()359 size_t GrResourceCache::cacheInfoRealAllocSize()
360 {
361     size_t realAllocImageSize = 0;
362     for (int i = 0; i < fPurgeableQueue.count(); i++) {
363         auto resource = fPurgeableQueue.at(i);
364         if (resource == nullptr || !IsValidAddress(resource) || !resource->isRealAlloc()) {
365             continue;
366         }
367         realAllocImageSize += resource->getRealAllocSize();
368     }
369     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
370         auto resource = fNonpurgeableResources[i];
371         if (resource == nullptr || !resource->isRealAlloc()) {
372             continue;
373         }
374         realAllocImageSize += resource->getRealAllocSize();
375     }
376     return realAllocImageSize;
377 }
378 
cacheInfoRealAllocQueue()379 std::string GrResourceCache::cacheInfoRealAllocQueue()
380 {
381     std::map<uint32_t, std::string> realAllocNameInfoWid;
382     std::map<uint32_t, size_t> realAllocSizeInfoWid;
383     std::map<uint32_t, int> realAllocPidInfoWid;
384     std::map<uint32_t, int> realAllocCountInfoWid;
385 
386     std::map<uint32_t, std::string> realAllocNameInfoPid;
387     std::map<uint32_t, size_t> realAllocSizeInfoPid;
388     std::map<uint32_t, int> realAllocCountInfoPid;
389 
390     std::map<uint32_t, std::string> realAllocNameInfoFid;
391     std::map<uint32_t, size_t> realAllocSizeInfoFid;
392     std::map<uint32_t, int> realAllocCountInfoFid;
393 
394     int realAllocCountUnknown = 0;
395     size_t realAllocSizeUnknown = 0;
396 
397     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
398         auto resource = fNonpurgeableResources[i];
399         if (resource == nullptr || !resource->isRealAlloc()) {
400             continue;
401         }
402         auto resourceTag = resource->getResourceTag();
403         if (resourceTag.fWid != 0) {
404             updateRealAllocWidMap(resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
405         } else if (resourceTag.fPid != 0) {
406             updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
407         } else if (resourceTag.fFid != 0) {
408             updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
409         } else {
410             realAllocCountUnknown++;
411             realAllocSizeUnknown += resource->getRealAllocSize();
412         }
413     }
414 
415     for (int i = 0; i < fPurgeableQueue.count(); i++) {
416         auto resource = fPurgeableQueue.at(i);
417         if (resource == nullptr || !IsValidAddress(resource) || !resource->isRealAlloc()) {
418             continue;
419         }
420         auto resourceTag = resource->getResourceTag();
421         if (resourceTag.fWid != 0) {
422             updateRealAllocWidMap(resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
423         } else if (resourceTag.fPid != 0) {
424             updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
425         } else if (resourceTag.fFid != 0) {
426             updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
427         } else {
428             realAllocCountUnknown++;
429             realAllocSizeUnknown += resource->getRealAllocSize();
430         }
431     }
432 
433     std::string infoStr;
434     if (realAllocSizeInfoWid.size() > 0) {
435         infoStr += ";RealAllocInfo_Node:[";
436         updatePurgeableWidInfo(infoStr, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
437     }
438     if (realAllocSizeInfoPid.size() > 0) {
439         infoStr += ";RealAllocInfo_Pid:[";
440         updatePurgeablePidInfo(infoStr, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
441     }
442     if (realAllocSizeInfoFid.size() > 0) {
443         infoStr += ";RealAllocInfo_Fid:[";
444         updatePurgeableFidInfo(infoStr, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
445     }
446     updatePurgeableUnknownInfo(infoStr, ";RealAllocInfo_Unknown:", realAllocCountUnknown, realAllocSizeUnknown);
447     return infoStr;
448 }
449 
realBytesOfPid()450 std::string GrResourceCache::realBytesOfPid()
451 {
452     std::string infoStr;
453     infoStr += ";fBytesOfPid : [";
454     if (fBytesOfPid.size() > 0) {
455         for (auto it = fBytesOfPid.begin(); it != fBytesOfPid.end(); it++) {
456             infoStr += std::to_string(it->first) + ":" + std::to_string(it->second) + ", ";
457         }
458     }
459     infoStr += "]";
460     return infoStr;
461 }
462 
updatePurgeableWidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,size_t> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)463 void GrResourceCache::updatePurgeableWidMap(GrGpuResource* resource,
464                                             std::map<uint32_t, std::string>& nameInfoWid,
465                                             std::map<uint32_t, size_t>& sizeInfoWid,
466                                             std::map<uint32_t, int>& pidInfoWid,
467                                             std::map<uint32_t, int>& countInfoWid)
468 {
469     auto resourceTag = resource->getResourceTag();
470     auto it = sizeInfoWid.find(resourceTag.fWid);
471     if (it != sizeInfoWid.end()) {
472         sizeInfoWid[resourceTag.fWid] = it->second + resource->gpuMemorySize();
473         countInfoWid[resourceTag.fWid]++;
474     } else {
475         sizeInfoWid[resourceTag.fWid] = resource->gpuMemorySize();
476         nameInfoWid[resourceTag.fWid] = resourceTag.fName;
477         pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
478         countInfoWid[resourceTag.fWid] = 1;
479     }
480 }
481 
updatePurgeablePidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)482 void GrResourceCache::updatePurgeablePidMap(GrGpuResource* resource,
483                                             std::map<uint32_t, std::string>& nameInfoPid,
484                                             std::map<uint32_t, size_t>& sizeInfoPid,
485                                             std::map<uint32_t, int>& countInfoPid)
486 {
487     auto resourceTag = resource->getResourceTag();
488     auto it = sizeInfoPid.find(resourceTag.fPid);
489     if (it != sizeInfoPid.end()) {
490         sizeInfoPid[resourceTag.fPid] = it->second + resource->gpuMemorySize();
491         countInfoPid[resourceTag.fPid]++;
492     } else {
493         sizeInfoPid[resourceTag.fPid] = resource->gpuMemorySize();
494         nameInfoPid[resourceTag.fPid] = resourceTag.fName;
495         countInfoPid[resourceTag.fPid] = 1;
496     }
497 }
498 
updatePurgeableFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)499 void GrResourceCache::updatePurgeableFidMap(GrGpuResource* resource,
500                                             std::map<uint32_t, std::string>& nameInfoFid,
501                                             std::map<uint32_t, size_t>& sizeInfoFid,
502                                             std::map<uint32_t, int>& countInfoFid)
503 {
504     auto resourceTag = resource->getResourceTag();
505     auto it = sizeInfoFid.find(resourceTag.fFid);
506     if (it != sizeInfoFid.end()) {
507         sizeInfoFid[resourceTag.fFid] = it->second + resource->gpuMemorySize();
508         countInfoFid[resourceTag.fFid]++;
509     } else {
510         sizeInfoFid[resourceTag.fFid] = resource->gpuMemorySize();
511         nameInfoFid[resourceTag.fFid] = resourceTag.fName;
512         countInfoFid[resourceTag.fFid] = 1;
513     }
514 }
515 
updateRealAllocWidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,size_t> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)516 void GrResourceCache::updateRealAllocWidMap(GrGpuResource* resource,
517                                             std::map<uint32_t, std::string>& nameInfoWid,
518                                             std::map<uint32_t, size_t>& sizeInfoWid,
519                                             std::map<uint32_t, int>& pidInfoWid,
520                                             std::map<uint32_t, int>& countInfoWid)
521 {
522     size_t size = resource->getRealAllocSize();
523     auto resourceTag = resource->getResourceTag();
524     auto it = sizeInfoWid.find(resourceTag.fWid);
525     if (it != sizeInfoWid.end()) {
526         sizeInfoWid[resourceTag.fWid] = it->second + size;
527         countInfoWid[resourceTag.fWid]++;
528     } else {
529         sizeInfoWid[resourceTag.fWid] = size;
530         nameInfoWid[resourceTag.fWid] = resourceTag.fName;
531         pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
532         countInfoWid[resourceTag.fWid] = 1;
533     }
534 }
535 
updateRealAllocPidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)536 void GrResourceCache::updateRealAllocPidMap(GrGpuResource* resource,
537                                             std::map<uint32_t, std::string>& nameInfoPid,
538                                             std::map<uint32_t, size_t>& sizeInfoPid,
539                                             std::map<uint32_t, int>& countInfoPid)
540 {
541     size_t size = resource->getRealAllocSize();
542     auto resourceTag = resource->getResourceTag();
543     auto it = sizeInfoPid.find(resourceTag.fPid);
544     if (it != sizeInfoPid.end()) {
545         sizeInfoPid[resourceTag.fPid] = it->second + size;
546         countInfoPid[resourceTag.fPid]++;
547     } else {
548         sizeInfoPid[resourceTag.fPid] = size;
549         nameInfoPid[resourceTag.fPid] = resourceTag.fName;
550         countInfoPid[resourceTag.fPid] = 1;
551     }
552 }
553 
updateRealAllocFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)554 void GrResourceCache::updateRealAllocFidMap(GrGpuResource* resource,
555                                             std::map<uint32_t, std::string>& nameInfoFid,
556                                             std::map<uint32_t, size_t>& sizeInfoFid,
557                                             std::map<uint32_t, int>& countInfoFid)
558 {
559     size_t size = resource->getRealAllocSize();
560     auto resourceTag = resource->getResourceTag();
561     auto it = sizeInfoFid.find(resourceTag.fFid);
562     if (it != sizeInfoFid.end()) {
563         sizeInfoFid[resourceTag.fFid] = it->second + size;
564         countInfoFid[resourceTag.fFid]++;
565     } else {
566         sizeInfoFid[resourceTag.fFid] = size;
567         nameInfoFid[resourceTag.fFid] = resourceTag.fName;
568         countInfoFid[resourceTag.fFid] = 1;
569     }
570 }
571 
updatePurgeableWidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,size_t> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)572 void GrResourceCache::updatePurgeableWidInfo(std::string& infoStr,
573                                              std::map<uint32_t, std::string>& nameInfoWid,
574                                              std::map<uint32_t, size_t>& sizeInfoWid,
575                                              std::map<uint32_t, int>& pidInfoWid,
576                                              std::map<uint32_t, int>& countInfoWid)
577 {
578     for (auto it = sizeInfoWid.begin(); it != sizeInfoWid.end(); it++) {
579         infoStr += "[" + nameInfoWid[it->first] +
580             ",pid=" + std::to_string(pidInfoWid[it->first]) +
581             ",NodeId=" + std::to_string(it->first & 0xFFFFFFFF) +
582             ",count=" + std::to_string(countInfoWid[it->first]) +
583             ",size=" + std::to_string(it->second) +
584             "(" + std::to_string(it->second / MB) + " MB)],";
585     }
586     infoStr += ']';
587 }
588 
updatePurgeablePidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)589 void GrResourceCache::updatePurgeablePidInfo(std::string& infoStr,
590                                              std::map<uint32_t, std::string>& nameInfoPid,
591                                              std::map<uint32_t, size_t>& sizeInfoPid,
592                                              std::map<uint32_t, int>& countInfoPid)
593 {
594     for (auto it = sizeInfoPid.begin(); it != sizeInfoPid.end(); it++) {
595         infoStr += "[" + nameInfoPid[it->first] +
596             ",pid=" + std::to_string(it->first) +
597             ",count=" + std::to_string(countInfoPid[it->first]) +
598             ",size=" + std::to_string(it->second) +
599             "(" + std::to_string(it->second / MB) + " MB)],";
600     }
601     infoStr += ']';
602 }
603 
updatePurgeableFidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)604 void GrResourceCache::updatePurgeableFidInfo(std::string& infoStr,
605                                              std::map<uint32_t, std::string>& nameInfoFid,
606                                              std::map<uint32_t, size_t>& sizeInfoFid,
607                                              std::map<uint32_t, int>& countInfoFid)
608 {
609     for (auto it = sizeInfoFid.begin(); it != sizeInfoFid.end(); it++) {
610         infoStr += "[" + nameInfoFid[it->first] +
611             ",typeid=" + std::to_string(it->first) +
612             ",count=" + std::to_string(countInfoFid[it->first]) +
613             ",size=" + std::to_string(it->second) +
614             "(" + std::to_string(it->second / MB) + " MB)],";
615     }
616     infoStr += ']';
617 }
618 
updatePurgeableUnknownInfo(std::string & infoStr,const std::string & unknownPrefix,const int countUnknown,const size_t sizeUnknown)619 void GrResourceCache::updatePurgeableUnknownInfo(
620     std::string& infoStr, const std::string& unknownPrefix, const int countUnknown, const size_t sizeUnknown)
621 {
622     if (countUnknown > 0) {
623         infoStr += unknownPrefix +
624             "[count=" + std::to_string(countUnknown) +
625             ",size=" + std::to_string(sizeUnknown) +
626             "(" + std::to_string(sizeUnknown / MB) + "MB)]";
627     }
628 }
629 #endif
630 
insertResource(GrGpuResource * resource)631 void GrResourceCache::insertResource(GrGpuResource* resource)
632 {
633     ASSERT_SINGLE_OWNER
634     SkASSERT(resource);
635     SkASSERT(!this->isInCache(resource));
636     SkASSERT(!resource->wasDestroyed());
637     SkASSERT(!resource->resourcePriv().isPurgeable());
638 
639     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
640     // up iterating over all the resources that already have timestamps.
641     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
642 
643     this->addToNonpurgeableArray(resource);
644 
645     size_t size = resource->gpuMemorySize();
646     SkDEBUGCODE(++fCount;)
647     fBytes += size;
648 
649     // OH ISSUE: memory count
650     auto pid = resource->getResourceTag().fPid;
651     if (pid && resource->isRealAlloc()) {
652         auto& pidSize = fBytesOfPid[pid];
653         pidSize += size;
654         fUpdatedBytesOfPid[pid] = pidSize;
655         if (pidSize >= fMemoryControl_ && fExitedPid_.find(pid) == fExitedPid_.end() && fMemoryOverflowCallback_) {
656             fMemoryOverflowCallback_(pid, pidSize, true);
657             fExitedPid_.insert(pid);
658             SkDebugf("OHOS resource overflow! pid[%{public}d], size[%{public}zu]", pid, pidSize);
659 #ifdef SKIA_OHOS
660             HITRACE_OHOS_NAME_FMT_ALWAYS("OHOS gpu resource overflow: pid(%u), size:(%u)", pid, pidSize);
661 #endif
662         }
663     }
664 
665 #if GR_CACHE_STATS
666     fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
667     fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
668 #endif
669     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
670         ++fBudgetedCount;
671         fBudgetedBytes += size;
672         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
673                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
674 #if GR_CACHE_STATS
675         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
676         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
677 #endif
678     }
679     SkASSERT(!resource->cacheAccess().isUsableAsScratch());
680 #ifdef SKIA_OHOS
681     if (fBudgetedBytes >= fMaxBytes) {
682         HITRACE_OHOS_NAME_FMT_ALWAYS("cache over fBudgetedBytes:(%u), fMaxBytes:(%u)", fBudgetedBytes, fMaxBytes);
683 #ifdef SKIA_DFX_FOR_OHOS
684         SimpleCacheInfo simpleCacheInfo;
685         traceBeforePurgeUnlockRes("insertResource", simpleCacheInfo);
686 #endif
687         this->purgeAsNeeded();
688 #ifdef SKIA_DFX_FOR_OHOS
689         traceAfterPurgeUnlockRes("insertResource", simpleCacheInfo);
690 #endif
691     } else {
692         this->purgeAsNeeded();
693     }
694 #else
695     this->purgeAsNeeded();
696 #endif
697 }
698 
removeResource(GrGpuResource * resource)699 void GrResourceCache::removeResource(GrGpuResource* resource) {
700     ASSERT_SINGLE_OWNER
701     this->validate();
702     SkASSERT(this->isInCache(resource));
703 
704     size_t size = resource->gpuMemorySize();
705     if (!IsValidAddress(resource)) {
706         return;
707     }
708     if (resource->resourcePriv().isPurgeable()) {
709         fPurgeableQueue.remove(resource);
710         fPurgeableBytes -= size;
711     } else {
712         this->removeFromNonpurgeableArray(resource);
713     }
714 
715     SkDEBUGCODE(--fCount;)
716     fBytes -= size;
717 
718     // OH ISSUE: memory count
719     auto pid = resource->getResourceTag().fPid;
720     if (pid && resource->isRealAlloc()) {
721         auto& pidSize = fBytesOfPid[pid];
722         pidSize -= size;
723         fUpdatedBytesOfPid[pid] = pidSize;
724         if (pidSize == 0) {
725             fBytesOfPid.erase(pid);
726         }
727     }
728 
729     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
730         --fBudgetedCount;
731         fBudgetedBytes -= size;
732         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
733                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
734     }
735 
736     if (resource->cacheAccess().isUsableAsScratch()) {
737         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
738     }
739     if (resource->getUniqueKey().isValid()) {
740         fUniqueHash.remove(resource->getUniqueKey());
741     }
742     this->validate();
743 }
744 
abandonAll()745 void GrResourceCache::abandonAll() {
746     AutoValidate av(this);
747 
748     // We need to make sure to free any resources that were waiting on a free message but never
749     // received one.
750     fTexturesAwaitingUnref.reset();
751 
752     while (fNonpurgeableResources.count()) {
753         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
754         SkASSERT(!back->wasDestroyed());
755         back->cacheAccess().abandon();
756     }
757 
758     while (fPurgeableQueue.count()) {
759         GrGpuResource* top = fPurgeableQueue.peek();
760         SkASSERT(!top->wasDestroyed());
761         if (IsValidAddress(top)) {
762             top->cacheAccess().abandon();
763         } else {
764             fPurgeableQueue.pop();
765         }
766     }
767 
768     fThreadSafeCache->dropAllRefs();
769 
770     SkASSERT(!fScratchMap.count());
771     SkASSERT(!fUniqueHash.count());
772     SkASSERT(!fCount);
773     SkASSERT(!this->getResourceCount());
774     SkASSERT(!fBytes);
775     SkASSERT(!fBudgetedCount);
776     SkASSERT(!fBudgetedBytes);
777     SkASSERT(!fPurgeableBytes);
778     SkASSERT(!fTexturesAwaitingUnref.count());
779 }
780 
releaseAll()781 void GrResourceCache::releaseAll() {
782     AutoValidate av(this);
783 
784     fThreadSafeCache->dropAllRefs();
785 
786     this->processFreedGpuResources();
787 
788     // We need to make sure to free any resources that were waiting on a free message but never
789     // received one.
790     fTexturesAwaitingUnref.reset();
791 
792     SkASSERT(fProxyProvider); // better have called setProxyProvider
793     SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
794 
795     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
796     // they also have a raw pointer back to this class (which is presumably going away)!
797     fProxyProvider->removeAllUniqueKeys();
798 
799     while (fNonpurgeableResources.count()) {
800         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
801         SkASSERT(!back->wasDestroyed());
802         back->cacheAccess().release();
803     }
804 
805     while (fPurgeableQueue.count()) {
806         GrGpuResource* top = fPurgeableQueue.peek();
807         SkASSERT(!top->wasDestroyed());
808         if (IsValidAddress(top)) {
809             top->cacheAccess().release();
810         } else {
811             fPurgeableQueue.pop();
812         }
813     }
814 
815     SkASSERT(!fScratchMap.count());
816     SkASSERT(!fUniqueHash.count());
817     SkASSERT(!fCount);
818     SkASSERT(!this->getResourceCount());
819     SkASSERT(!fBytes);
820     SkASSERT(!fBudgetedCount);
821     SkASSERT(!fBudgetedBytes);
822     SkASSERT(!fPurgeableBytes);
823     SkASSERT(!fTexturesAwaitingUnref.count());
824 }
825 
releaseByTag(const GrGpuResourceTag & tag)826 void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
827     AutoValidate av(this);
828     this->processFreedGpuResources();
829     SkASSERT(fProxyProvider); // better have called setProxyProvider
830     std::vector<GrGpuResource*> recycleVector;
831     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
832         GrGpuResource* resource = fNonpurgeableResources[i];
833         if (tag.filter(resource->getResourceTag())) {
834             recycleVector.emplace_back(resource);
835             if (resource->getUniqueKey().isValid()) {
836                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
837                     GrProxyProvider::InvalidateGPUResource::kNo);
838             }
839         }
840     }
841 
842     for (int i = 0; i < fPurgeableQueue.count(); i++) {
843         GrGpuResource* resource = fPurgeableQueue.at(i);
844         if (IsValidAddress(resource) && tag.filter(resource->getResourceTag())) {
845             recycleVector.emplace_back(resource);
846             if (resource->getUniqueKey().isValid()) {
847                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
848                     GrProxyProvider::InvalidateGPUResource::kNo);
849             }
850         }
851     }
852 
853     for (auto resource : recycleVector) {
854         SkASSERT(!resource->wasDestroyed());
855         resource->cacheAccess().release();
856     }
857 }
858 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)859 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
860     if (tag.isGrTagValid()) {
861         grResourceTagCacheStack.push(tag);
862         return;
863     }
864     if (!grResourceTagCacheStack.empty()) {
865         grResourceTagCacheStack.pop();
866     }
867 }
868 
popGrResourceTag()869 void GrResourceCache::popGrResourceTag()
870 {
871     if (!grResourceTagCacheStack.empty()) {
872         grResourceTagCacheStack.pop();
873     }
874 }
875 
getCurrentGrResourceTag() const876 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
877     if (grResourceTagCacheStack.empty()) {
878         return{};
879     }
880     return grResourceTagCacheStack.top();
881 }
882 
getAllGrGpuResourceTags() const883 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
884     std::set<GrGpuResourceTag> result;
885     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
886         auto tag = fNonpurgeableResources[i]->getResourceTag();
887         result.insert(tag);
888     }
889     return result;
890 }
891 
892 #ifdef SKIA_OHOS
893 // OH ISSUE: set purgeable resource max count limit.
setPurgeableResourceLimit(int purgeableMaxCount)894 void GrResourceCache::setPurgeableResourceLimit(int purgeableMaxCount)
895 {
896     fPurgeableMaxCount = purgeableMaxCount;
897 }
898 #endif
899 
900 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)901 void GrResourceCache::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
902 {
903     fUpdatedBytesOfPid.swap(out);
904 }
905 
906 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)907 void GrResourceCache::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
908 {
909     if (fMemoryOverflowCallback_ == nullptr) {
910         fMemoryOverflowCallback_ = callback;
911         fMemoryControl_ = size;
912     }
913 }
914 
915 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const916 bool GrResourceCache::isPidAbnormal() const
917 {
918     return fExitedPid_.find(getCurrentGrResourceTag().fPid) != fExitedPid_.end();
919 }
920 
921 // OH ISSUE: change the fbyte when the resource tag changes.
changeByteOfPid(int32_t beforePid,int32_t afterPid,size_t bytes,bool beforeRealAlloc,bool afterRealAlloc)922 void GrResourceCache::changeByteOfPid(int32_t beforePid, int32_t afterPid,
923     size_t bytes, bool beforeRealAlloc, bool afterRealAlloc)
924 {
925     if (beforePid && beforeRealAlloc) {
926         auto& pidSize = fBytesOfPid[beforePid];
927         pidSize -= bytes;
928         fUpdatedBytesOfPid[beforePid] = pidSize;
929         if (pidSize == 0) {
930             fBytesOfPid.erase(beforePid);
931         }
932     }
933     if (afterPid && afterRealAlloc) {
934         auto& size = fBytesOfPid[afterPid];
935         size += bytes;
936         fUpdatedBytesOfPid[afterPid] = size;
937     }
938 }
939 
refResource(GrGpuResource * resource)940 void GrResourceCache::refResource(GrGpuResource* resource) {
941     SkASSERT(resource);
942     SkASSERT(resource->getContext()->priv().getResourceCache() == this);
943     if (resource->cacheAccess().hasRef()) {
944         resource->ref();
945     } else {
946         this->refAndMakeResourceMRU(resource);
947     }
948     this->validate();
949 }
950 
951 class GrResourceCache::AvailableForScratchUse {
952 public:
AvailableForScratchUse()953     AvailableForScratchUse() { }
954 
operator ()(const GrGpuResource * resource) const955     bool operator()(const GrGpuResource* resource) const {
956         // Everything that is in the scratch map should be usable as a
957         // scratch resource.
958         return true;
959     }
960 };
961 
findAndRefScratchResource(const GrScratchKey & scratchKey)962 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
963     SkASSERT(scratchKey.isValid());
964 
965     GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
966     if (resource) {
967         fScratchMap.remove(scratchKey, resource);
968         this->refAndMakeResourceMRU(resource);
969         this->validate();
970     }
971     return resource;
972 }
973 
willRemoveScratchKey(const GrGpuResource * resource)974 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
975     ASSERT_SINGLE_OWNER
976     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
977     if (resource->cacheAccess().isUsableAsScratch()) {
978         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
979     }
980 }
981 
removeUniqueKey(GrGpuResource * resource)982 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
983     ASSERT_SINGLE_OWNER
984     // Someone has a ref to this resource in order to have removed the key. When the ref count
985     // reaches zero we will get a ref cnt notification and figure out what to do with it.
986     if (resource->getUniqueKey().isValid()) {
987         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
988         fUniqueHash.remove(resource->getUniqueKey());
989     }
990     resource->cacheAccess().removeUniqueKey();
991     if (resource->cacheAccess().isUsableAsScratch()) {
992         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
993     }
994 
995     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
996     // require purging. However, the resource must be ref'ed to get here and therefore can't
997     // be purgeable. We'll purge it when the refs reach zero.
998     SkASSERT(!resource->resourcePriv().isPurgeable());
999     this->validate();
1000 }
1001 
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)1002 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
1003     ASSERT_SINGLE_OWNER
1004     SkASSERT(resource);
1005     SkASSERT(this->isInCache(resource));
1006 
1007     // If another resource has the new key, remove its key then install the key on this resource.
1008     if (newKey.isValid()) {
1009         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
1010             // If the old resource using the key is purgeable and is unreachable, then remove it.
1011             if (!old->resourcePriv().getScratchKey().isValid() &&
1012                 old->resourcePriv().isPurgeable()) {
1013                 old->cacheAccess().release();
1014             } else {
1015                 // removeUniqueKey expects an external owner of the resource.
1016                 this->removeUniqueKey(sk_ref_sp(old).get());
1017             }
1018         }
1019         SkASSERT(nullptr == fUniqueHash.find(newKey));
1020 
1021         // Remove the entry for this resource if it already has a unique key.
1022         if (resource->getUniqueKey().isValid()) {
1023             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
1024             fUniqueHash.remove(resource->getUniqueKey());
1025             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
1026         } else {
1027             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
1028             // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
1029             // unique key until after this check.
1030             if (resource->cacheAccess().isUsableAsScratch()) {
1031                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1032             }
1033         }
1034 
1035         resource->cacheAccess().setUniqueKey(newKey);
1036         fUniqueHash.add(resource);
1037     } else {
1038         this->removeUniqueKey(resource);
1039     }
1040 
1041     this->validate();
1042 }
1043 
refAndMakeResourceMRU(GrGpuResource * resource)1044 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
1045     ASSERT_SINGLE_OWNER
1046     SkASSERT(resource);
1047     SkASSERT(this->isInCache(resource));
1048     if (!IsValidAddress(resource)) {
1049         return;
1050     }
1051     if (resource->resourcePriv().isPurgeable()) {
1052         // It's about to become unpurgeable.
1053         fPurgeableBytes -= resource->gpuMemorySize();
1054         fPurgeableQueue.remove(resource);
1055         this->addToNonpurgeableArray(resource);
1056     } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
1057                resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1058         SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
1059         fNumBudgetedResourcesFlushWillMakePurgeable--;
1060     }
1061     resource->cacheAccess().ref();
1062 
1063     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1064     this->validate();
1065 }
1066 
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)1067 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
1068                                                GrGpuResource::LastRemovedRef removedRef) {
1069     ASSERT_SINGLE_OWNER
1070     SkASSERT(resource);
1071     SkASSERT(!resource->wasDestroyed());
1072     SkASSERT(this->isInCache(resource));
1073     // This resource should always be in the nonpurgeable array when this function is called. It
1074     // will be moved to the queue if it is newly purgeable.
1075     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
1076 
1077     if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
1078         if (resource->cacheAccess().isUsableAsScratch()) {
1079             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1080         }
1081     }
1082 
1083     if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1084         this->validate();
1085         return;
1086     }
1087 
1088 #ifdef SK_DEBUG
1089     // When the timestamp overflows validate() is called. validate() checks that resources in
1090     // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
1091     // the purgeable queue happens just below in this function. So we mark it as an exception.
1092     if (resource->resourcePriv().isPurgeable()) {
1093         fNewlyPurgeableResourceForValidation = resource;
1094     }
1095 #endif
1096     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1097     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
1098 
1099     if (!resource->resourcePriv().isPurgeable() &&
1100         resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1101         ++fNumBudgetedResourcesFlushWillMakePurgeable;
1102     }
1103 
1104     if (!resource->resourcePriv().isPurgeable()) {
1105         this->validate();
1106         return;
1107     }
1108 
1109     this->removeFromNonpurgeableArray(resource);
1110     fPurgeableQueue.insert(resource);
1111     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
1112     fPurgeableBytes += resource->gpuMemorySize();
1113 
1114     bool hasUniqueKey = resource->getUniqueKey().isValid();
1115 
1116     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
1117 
1118     if (budgetedType == GrBudgetedType::kBudgeted) {
1119         // Purge the resource immediately if we're over budget
1120         // Also purge if the resource has neither a valid scratch key nor a unique key.
1121         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
1122         if (!this->overBudget() && hasKey) {
1123             return;
1124         }
1125     } else {
1126         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
1127         // they can be reused again by the image connected to the unique key.
1128         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
1129             return;
1130         }
1131         // Check whether this resource could still be used as a scratch resource.
1132         if (!resource->resourcePriv().refsWrappedObjects() &&
1133             resource->resourcePriv().getScratchKey().isValid()) {
1134             // We won't purge an existing resource to make room for this one.
1135             if (this->wouldFit(resource->gpuMemorySize())) {
1136                 resource->resourcePriv().makeBudgeted();
1137                 return;
1138             }
1139         }
1140     }
1141 
1142     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
1143     resource->cacheAccess().release();
1144     // We should at least free this resource, perhaps dependent resources as well.
1145     SkASSERT(this->getResourceCount() < beforeCount);
1146     this->validate();
1147 }
1148 
didChangeBudgetStatus(GrGpuResource * resource)1149 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
1150     ASSERT_SINGLE_OWNER
1151     SkASSERT(resource);
1152     SkASSERT(this->isInCache(resource));
1153 
1154     size_t size = resource->gpuMemorySize();
1155     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
1156     // resource become purgeable. However, we should never allow that transition. Wrapped
1157     // resources are the only resources that can be in that state and they aren't allowed to
1158     // transition from one budgeted state to another.
1159     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
1160     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1161         ++fBudgetedCount;
1162         fBudgetedBytes += size;
1163 #if GR_CACHE_STATS
1164         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
1165         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
1166 #endif
1167         if (!resource->resourcePriv().isPurgeable() &&
1168             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1169             ++fNumBudgetedResourcesFlushWillMakePurgeable;
1170         }
1171         if (resource->cacheAccess().isUsableAsScratch()) {
1172             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1173         }
1174         this->purgeAsNeeded();
1175     } else {
1176         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
1177 #ifdef SKIA_OHOS
1178         GrPerfMonitorReporter::GetInstance().recordTextureCache(resource->getResourceTag().fName);
1179 #endif
1180         --fBudgetedCount;
1181         fBudgetedBytes -= size;
1182         if (!resource->resourcePriv().isPurgeable() &&
1183             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1184             --fNumBudgetedResourcesFlushWillMakePurgeable;
1185         }
1186         if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
1187             resource->resourcePriv().getScratchKey().isValid()) {
1188             fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1189         }
1190     }
1191     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
1192     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
1193                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
1194 
1195     this->validate();
1196 }
1197 
1198 static constexpr int timeUnit = 1000;
1199 
1200 // OH ISSUE: allow access to release interface
allowToPurge(const std::function<bool (void)> & nextFrameHasArrived)1201 bool GrResourceCache::allowToPurge(const std::function<bool(void)>& nextFrameHasArrived)
1202 {
1203     if (!fEnabled) {
1204         return true;
1205     }
1206     if (fFrameInfo.duringFrame == 0) {
1207         if (nextFrameHasArrived && nextFrameHasArrived()) {
1208             return false;
1209         }
1210         return true;
1211     }
1212     if (fFrameInfo.frameCount != fLastFrameCount) { // the next frame arrives
1213         struct timespec startTime = {0, 0};
1214         if (clock_gettime(CLOCK_REALTIME, &startTime) == -1) {
1215             return true;
1216         }
1217         fStartTime = startTime.tv_sec * timeUnit * timeUnit + startTime.tv_nsec / timeUnit;
1218         fLastFrameCount = fFrameInfo.frameCount;
1219         return true;
1220     }
1221     struct timespec endTime = {0, 0};
1222     if (clock_gettime(CLOCK_REALTIME, &endTime) == -1) {
1223         return true;
1224     }
1225     if (((endTime.tv_sec * timeUnit * timeUnit + endTime.tv_nsec / timeUnit) - fStartTime) >= fOvertimeDuration) {
1226         return false;
1227     }
1228     return true;
1229 }
1230 
purgeAsNeeded(const std::function<bool (void)> & nextFrameHasArrived)1231 void GrResourceCache::purgeAsNeeded(const std::function<bool(void)>& nextFrameHasArrived) {
1232     SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
1233     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
1234     if (invalidKeyMsgs.count()) {
1235         SkASSERT(fProxyProvider);
1236 
1237         for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
1238             if (invalidKeyMsgs[i].inThreadSafeCache()) {
1239                 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
1240                 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
1241             } else {
1242                 fProxyProvider->processInvalidUniqueKey(
1243                                                     invalidKeyMsgs[i].key(), nullptr,
1244                                                     GrProxyProvider::InvalidateGPUResource::kYes);
1245                 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
1246             }
1247         }
1248     }
1249 
1250     this->processFreedGpuResources();
1251 
1252     bool stillOverbudget = this->overBudget(nextFrameHasArrived);
1253     while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1254         GrGpuResource* resource = fPurgeableQueue.peek();
1255         SkASSERT(resource->resourcePriv().isPurgeable());
1256         if (IsValidAddress(resource)) {
1257             resource->cacheAccess().release();
1258             stillOverbudget = this->overBudget(nextFrameHasArrived);
1259         } else {
1260             fPurgeableQueue.pop();
1261         }
1262     }
1263 
1264     if (stillOverbudget) {
1265         fThreadSafeCache->dropUniqueRefs(this);
1266 
1267         stillOverbudget = this->overBudget(nextFrameHasArrived);
1268         while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1269             GrGpuResource* resource = fPurgeableQueue.peek();
1270             SkASSERT(resource->resourcePriv().isPurgeable());
1271             if (IsValidAddress(resource)) {
1272                 resource->cacheAccess().release();
1273                 stillOverbudget = this->overBudget(nextFrameHasArrived);
1274             } else {
1275                 fPurgeableQueue.pop();
1276             }
1277         }
1278     }
1279 
1280     this->validate();
1281 }
1282 
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)1283 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
1284                                              bool scratchResourcesOnly) {
1285 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1286     SimpleCacheInfo simpleCacheInfo;
1287     traceBeforePurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1288 #endif
1289     if (!scratchResourcesOnly) {
1290         if (purgeTime) {
1291             fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
1292         } else {
1293             fThreadSafeCache->dropUniqueRefs(nullptr);
1294         }
1295 
1296         // We could disable maintaining the heap property here, but it would add a lot of
1297         // complexity. Moreover, this is rarely called.
1298         while (fPurgeableQueue.count()) {
1299             GrGpuResource* resource = fPurgeableQueue.peek();
1300             if (IsValidAddress(resource)) {
1301                 const GrStdSteadyClock::time_point resourceTime =
1302                         resource->cacheAccess().timeWhenResourceBecamePurgeable();
1303                 if (purgeTime && resourceTime >= *purgeTime) {
1304                     // Resources were given both LRU timestamps and tagged with a frame number when
1305                     // they first became purgeable. The LRU timestamp won't change again until the
1306                     // resource is made non-purgeable again. So, at this point all the remaining
1307                     // resources in the timestamp-sorted queue will have a frame number >= to this
1308                     // one.
1309                     break;
1310                 }
1311 
1312                 SkASSERT(resource->resourcePriv().isPurgeable());
1313                 resource->cacheAccess().release();
1314             } else {
1315                 fPurgeableQueue.pop();
1316             }
1317         }
1318     } else {
1319         // Early out if the very first item is too new to purge to avoid sorting the queue when
1320         // nothing will be deleted.
1321         if (purgeTime && fPurgeableQueue.count() &&
1322             fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
1323 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1324             traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1325 #endif
1326             return;
1327         }
1328 
1329         // Sort the queue
1330         fPurgeableQueue.sort();
1331 
1332         // Make a list of the scratch resources to delete
1333         SkTDArray<GrGpuResource*> scratchResources;
1334         for (int i = 0; i < fPurgeableQueue.count(); i++) {
1335             GrGpuResource* resource = fPurgeableQueue.at(i);
1336             if (!IsValidAddress(resource)) {
1337                 continue;
1338             }
1339 
1340             const GrStdSteadyClock::time_point resourceTime =
1341                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
1342             if (purgeTime && resourceTime >= *purgeTime) {
1343                 // scratch or not, all later iterations will be too recently used to purge.
1344                 break;
1345             }
1346             SkASSERT(resource->resourcePriv().isPurgeable());
1347             if (!resource->getUniqueKey().isValid()) {
1348                 *scratchResources.append() = resource;
1349             }
1350         }
1351 
1352         // Delete the scratch resources. This must be done as a separate pass
1353         // to avoid messing up the sorted order of the queue
1354         for (int i = 0; i < scratchResources.count(); i++) {
1355             scratchResources.getAt(i)->cacheAccess().release();
1356         }
1357     }
1358 
1359     this->validate();
1360 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1361     traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1362 #endif
1363 }
1364 
purgeUnlockAndSafeCacheGpuResources()1365 void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
1366 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1367     SimpleCacheInfo simpleCacheInfo;
1368     traceBeforePurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1369 #endif
1370     fThreadSafeCache->dropUniqueRefs(nullptr);
1371     // Sort the queue
1372     fPurgeableQueue.sort();
1373 
1374     //Make a list of the scratch resources to delete
1375     SkTDArray<GrGpuResource*> scratchResources;
1376     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1377         GrGpuResource* resource = fPurgeableQueue.at(i);
1378         if (!resource || !IsValidAddress(resource)) {
1379             continue;
1380         }
1381         SkASSERT(resource->resourcePriv().isPurgeable());
1382         if (!resource->getUniqueKey().isValid()) {
1383             *scratchResources.append() = resource;
1384         }
1385     }
1386 
1387     //Delete the scatch resource. This must be done as a separate pass
1388     //to avoid messing up the sorted order of the queue
1389     for (int i = 0; i <scratchResources.count(); i++) {
1390         scratchResources.getAt(i)->cacheAccess().release();
1391     }
1392 
1393     this->validate();
1394 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1395     traceAfterPurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1396 #endif
1397 }
1398 
1399 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)1400 void GrResourceCache::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived) {
1401     if (!fEnabled) {
1402         return;
1403     }
1404     this->purgeAsNeeded(nextFrameHasArrived);
1405 }
1406 
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)1407 void GrResourceCache::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
1408         const std::set<int>& protectedPidSet) {
1409     HITRACE_OHOS_NAME_FMT_ALWAYS("PurgeGrResourceCache cur=%d, limit=%d", fBudgetedBytes, fMaxBytes);
1410     if (exitedPidSet.size() > 1) {
1411         for (int i = 1; i < fPurgeableQueue.count(); i++) {
1412             GrGpuResource* resource = fPurgeableQueue.at(i);
1413             SkASSERT(resource->resourcePriv().isPurgeable());
1414             if (IsValidAddress(resource) && exitedPidSet.find(resource->getResourceTag().fPid) != exitedPidSet.end()) {
1415                 resource->cacheAccess().release();
1416                 this->validate();
1417                 return;
1418             }
1419         }
1420     }
1421     fPurgeableQueue.sort();
1422     const char* softLimitPercentage = "0.9";
1423     #ifdef NOT_BUILD_FOR_OHOS_SDK
1424     static int softLimit =
1425             std::atof(OHOS::system::GetParameter("persist.sys.graphic.mem.soft_limit",
1426             softLimitPercentage).c_str()) * fMaxBytes;
1427     #else
1428     static int softLimit = 0.9 * fMaxBytes;
1429     #endif
1430     if (fBudgetedBytes >= softLimit) {
1431         for (int i=0; i < fPurgeableQueue.count(); i++) {
1432             GrGpuResource* resource = fPurgeableQueue.at(i);
1433             SkASSERT(resource->resourcePriv().isPurgeable());
1434             if (IsValidAddress(resource) &&
1435                 protectedPidSet.find(resource->getResourceTag().fPid) == protectedPidSet.end() &&
1436                 (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1437                 resource->cacheAccess().release();
1438                 this->validate();
1439                 return;
1440             }
1441         }
1442     }
1443 }
1444 
purgeUnlockedResourcesByPid(bool scratchResourceOnly,const std::set<int> & exitedPidSet)1445 void GrResourceCache::purgeUnlockedResourcesByPid(bool scratchResourceOnly, const std::set<int>& exitedPidSet) {
1446 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1447     SimpleCacheInfo simpleCacheInfo;
1448     traceBeforePurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1449 #endif
1450     // Sort the queue
1451     fPurgeableQueue.sort();
1452 
1453     //Make lists of the need purged resources to delete
1454     fThreadSafeCache->dropUniqueRefs(nullptr);
1455     SkTDArray<GrGpuResource*> exitPidResources;
1456     SkTDArray<GrGpuResource*> scratchResources;
1457     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1458         GrGpuResource* resource = fPurgeableQueue.at(i);
1459         if (!resource || !IsValidAddress(resource)) {
1460             continue;
1461         }
1462         SkASSERT(resource->resourcePriv().isPurgeable());
1463         if (exitedPidSet.count(resource->getResourceTag().fPid)) {
1464             *exitPidResources.append() = resource;
1465         } else if (!resource->getUniqueKey().isValid()) {
1466             *scratchResources.append() = resource;
1467         }
1468     }
1469 
1470     //Delete the exited pid and scatch resource. This must be done as a separate pass
1471     //to avoid messing up the sorted order of the queue
1472     for (int i = 0; i <exitPidResources.count(); i++) {
1473         exitPidResources.getAt(i)->cacheAccess().release();
1474     }
1475     for (int i = 0; i <scratchResources.count(); i++) {
1476         scratchResources.getAt(i)->cacheAccess().release();
1477     }
1478 
1479     for (auto pid : exitedPidSet) {
1480         fExitedPid_.erase(pid);
1481     }
1482 
1483     this->validate();
1484 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1485     traceAfterPurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1486 #endif
1487 }
1488 
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag & tag)1489 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
1490     // Sort the queue
1491     fPurgeableQueue.sort();
1492 
1493     //Make a list of the scratch resources to delete
1494     SkTDArray<GrGpuResource*> scratchResources;
1495     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1496         GrGpuResource* resource = fPurgeableQueue.at(i);
1497         SkASSERT(resource->resourcePriv().isPurgeable());
1498         if (IsValidAddress(resource) && tag.filter(resource->getResourceTag()) &&
1499             (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1500             *scratchResources.append() = resource;
1501         }
1502     }
1503 
1504     //Delete the scatch resource. This must be done as a separate pass
1505     //to avoid messing up the sorted order of the queue
1506     for (int i = 0; i <scratchResources.count(); i++) {
1507         scratchResources.getAt(i)->cacheAccess().release();
1508     }
1509 
1510     this->validate();
1511 }
1512 
purgeToMakeHeadroom(size_t desiredHeadroomBytes)1513 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
1514     AutoValidate av(this);
1515     if (desiredHeadroomBytes > fMaxBytes) {
1516         return false;
1517     }
1518     if (this->wouldFit(desiredHeadroomBytes)) {
1519         return true;
1520     }
1521     fPurgeableQueue.sort();
1522 
1523     size_t projectedBudget = fBudgetedBytes;
1524     int purgeCnt = 0;
1525     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1526         GrGpuResource* resource = fPurgeableQueue.at(i);
1527         if (IsValidAddress(resource) && GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1528             projectedBudget -= resource->gpuMemorySize();
1529         }
1530         if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
1531             purgeCnt = i + 1;
1532             break;
1533         }
1534     }
1535     if (purgeCnt == 0) {
1536         return false;
1537     }
1538 
1539     // Success! Release the resources.
1540     // Copy to array first so we don't mess with the queue.
1541     std::vector<GrGpuResource*> resources;
1542     resources.reserve(purgeCnt);
1543     for (int i = 0; i < purgeCnt; i++) {
1544         resources.push_back(fPurgeableQueue.at(i));
1545     }
1546     for (GrGpuResource* resource : resources) {
1547         resource->cacheAccess().release();
1548     }
1549     return true;
1550 }
1551 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)1552 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
1553 
1554     const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
1555     bool stillOverbudget = tmpByteBudget < fBytes;
1556 
1557     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
1558         // Sort the queue
1559         fPurgeableQueue.sort();
1560 
1561         // Make a list of the scratch resources to delete
1562         SkTDArray<GrGpuResource*> scratchResources;
1563         size_t scratchByteCount = 0;
1564         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
1565             GrGpuResource* resource = fPurgeableQueue.at(i);
1566             SkASSERT(resource->resourcePriv().isPurgeable());
1567             if (IsValidAddress(resource) && !resource->getUniqueKey().isValid()) {
1568                 *scratchResources.append() = resource;
1569                 scratchByteCount += resource->gpuMemorySize();
1570                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
1571             }
1572         }
1573 
1574         // Delete the scratch resources. This must be done as a separate pass
1575         // to avoid messing up the sorted order of the queue
1576         for (int i = 0; i < scratchResources.count(); i++) {
1577             scratchResources.getAt(i)->cacheAccess().release();
1578         }
1579         stillOverbudget = tmpByteBudget < fBytes;
1580 
1581         this->validate();
1582     }
1583 
1584     // Purge any remaining resources in LRU order
1585     if (stillOverbudget) {
1586         const size_t cachedByteCount = fMaxBytes;
1587         fMaxBytes = tmpByteBudget;
1588         this->purgeAsNeeded();
1589         fMaxBytes = cachedByteCount;
1590     }
1591 }
1592 
requestsFlush() const1593 bool GrResourceCache::requestsFlush() const {
1594     return this->overBudget() && !fPurgeableQueue.count() &&
1595            fNumBudgetedResourcesFlushWillMakePurgeable > 0;
1596 }
1597 
insertDelayedTextureUnref(GrTexture * texture)1598 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
1599     texture->ref();
1600     uint32_t id = texture->uniqueID().asUInt();
1601     if (auto* data = fTexturesAwaitingUnref.find(id)) {
1602         data->addRef();
1603     } else {
1604         fTexturesAwaitingUnref.set(id, {texture});
1605     }
1606 }
1607 
processFreedGpuResources()1608 void GrResourceCache::processFreedGpuResources() {
1609     if (!fTexturesAwaitingUnref.count()) {
1610         return;
1611     }
1612 
1613     SkTArray<GrTextureFreedMessage> msgs;
1614     fFreedTextureInbox.poll(&msgs);
1615     for (int i = 0; i < msgs.count(); ++i) {
1616         SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
1617         uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
1618         TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
1619         // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
1620         // empty and we would have returned early above. Thus, any texture from a message should be
1621         // in the list of fTexturesAwaitingUnref.
1622         SkASSERT(info);
1623         info->unref();
1624         if (info->finished()) {
1625             fTexturesAwaitingUnref.remove(id);
1626         }
1627     }
1628 }
1629 
addToNonpurgeableArray(GrGpuResource * resource)1630 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
1631     int index = fNonpurgeableResources.count();
1632     *fNonpurgeableResources.append() = resource;
1633     *resource->cacheAccess().accessCacheIndex() = index;
1634 }
1635 
removeFromNonpurgeableArray(GrGpuResource * resource)1636 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
1637     int* index = resource->cacheAccess().accessCacheIndex();
1638     // Fill the hole we will create in the array with the tail object, adjust its index, and
1639     // then pop the array
1640     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
1641     SkASSERT(fNonpurgeableResources[*index] == resource);
1642     fNonpurgeableResources[*index] = tail;
1643     *tail->cacheAccess().accessCacheIndex() = *index;
1644     fNonpurgeableResources.pop();
1645     SkDEBUGCODE(*index = -1);
1646 }
1647 
getNextTimestamp()1648 uint32_t GrResourceCache::getNextTimestamp() {
1649     // If we wrap then all the existing resources will appear older than any resources that get
1650     // a timestamp after the wrap.
1651     if (0 == fTimestamp) {
1652         int count = this->getResourceCount();
1653         if (count) {
1654             // Reset all the timestamps. We sort the resources by timestamp and then assign
1655             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
1656             // rare.
1657             SkTDArray<GrGpuResource*> sortedPurgeableResources;
1658             sortedPurgeableResources.setReserve(fPurgeableQueue.count());
1659 
1660             while (fPurgeableQueue.count()) {
1661                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
1662                 fPurgeableQueue.pop();
1663             }
1664 
1665             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
1666                      CompareTimestamp);
1667 
1668             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
1669             // timestamp and assign new timestamps.
1670             int currP = 0;
1671             int currNP = 0;
1672             while (currP < sortedPurgeableResources.count() &&
1673                    currNP < fNonpurgeableResources.count()) {
1674                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
1675                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
1676                 SkASSERT(tsP != tsNP);
1677                 if (tsP < tsNP) {
1678                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1679                 } else {
1680                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
1681                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1682                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1683                 }
1684             }
1685 
1686             // The above loop ended when we hit the end of one array. Finish the other one.
1687             while (currP < sortedPurgeableResources.count()) {
1688                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1689             }
1690             while (currNP < fNonpurgeableResources.count()) {
1691                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1692                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1693             }
1694 
1695             // Rebuild the queue.
1696             for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
1697                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
1698             }
1699 
1700             this->validate();
1701             SkASSERT(count == this->getResourceCount());
1702 
1703             // count should be the next timestamp we return.
1704             SkASSERT(fTimestamp == SkToU32(count));
1705         }
1706     }
1707     return fTimestamp++;
1708 }
1709 
1710 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpAllResource(std::stringstream & dump) const1711 void GrResourceCache::dumpAllResource(std::stringstream &dump) const {
1712     if (getResourceCount() == 0) {
1713         return;
1714     }
1715     dump << "Purgeable: " << fPurgeableQueue.count() << std::endl;
1716     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1717         GrGpuResource* resource = fPurgeableQueue.at(i);
1718         if (strcmp(resource->getResourceType(), "VkImage") != 0) continue;
1719         dump << i << " " << resource->getResourceType() << " ";
1720         resource->dumpVkImageInfo(dump);
1721     }
1722     dump << "Non-Purgeable: " << fNonpurgeableResources.count() << std::endl;
1723     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1724         GrGpuResource* resource = fNonpurgeableResources[i];
1725         if (strcmp(resource->getResourceType(), "VkImage") != 0) continue;
1726         dump << i << " " << resource->getResourceType() << " ";
1727         resource->dumpVkImageInfo(dump);
1728     }
1729 #ifdef SK_VULKAN
1730     dump << "Destroy Record: " << std::endl;
1731     ParallelDebug::DumpAllDestroyVkImage(dump);
1732 #endif
1733 }
1734 #endif
1735 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const1736 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
1737     SkTDArray<GrGpuResource*> resources;
1738     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1739         *resources.append() = fNonpurgeableResources[i];
1740     }
1741     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1742         *resources.append() = fPurgeableQueue.at(i);
1743     }
1744     for (int i = 0; i < resources.count(); i++) {
1745         auto resource = resources.getAt(i);
1746         if (!resource || !IsValidAddress(resource) || resource->wasDestroyed()) {
1747             continue;
1748         }
1749         resource->dumpMemoryStatistics(traceMemoryDump);
1750     }
1751 }
1752 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const1753 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
1754     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1755         if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
1756             fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1757         }
1758     }
1759     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1760         if (IsValidAddress(fPurgeableQueue.at(i)) && tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
1761             fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1762         }
1763     }
1764 }
1765 
1766 #if GR_CACHE_STATS
getStats(Stats * stats) const1767 void GrResourceCache::getStats(Stats* stats) const {
1768     stats->reset();
1769 
1770     stats->fTotal = this->getResourceCount();
1771     stats->fNumNonPurgeable = fNonpurgeableResources.count();
1772     stats->fNumPurgeable = fPurgeableQueue.count();
1773 
1774     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1775         stats->update(fNonpurgeableResources[i]);
1776     }
1777     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1778         stats->update(fPurgeableQueue.at(i));
1779     }
1780 }
1781 
1782 #if GR_TEST_UTILS
dumpStats(SkString * out) const1783 void GrResourceCache::dumpStats(SkString* out) const {
1784     this->validate();
1785 
1786     Stats stats;
1787 
1788     this->getStats(&stats);
1789 
1790     float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1791 
1792     out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1793     out->appendf("\t\tEntry Count: current %d"
1794                  " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1795                  stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1796                  stats.fScratch, fHighWaterCount);
1797     out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1798                  SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1799                  SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1800 }
1801 
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const1802 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1803                                              SkTArray<double>* values) const {
1804     this->validate();
1805 
1806     Stats stats;
1807     this->getStats(&stats);
1808 
1809     keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1810 }
1811 #endif // GR_TEST_UTILS
1812 #endif // GR_CACHE_STATS
1813 
1814 #ifdef SK_DEBUG
validate() const1815 void GrResourceCache::validate() const {
1816     // Reduce the frequency of validations for large resource counts.
1817     static SkRandom gRandom;
1818     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1819     if (~mask && (gRandom.nextU() & mask)) {
1820         return;
1821     }
1822 
1823     struct Stats {
1824         size_t fBytes;
1825         int fBudgetedCount;
1826         size_t fBudgetedBytes;
1827         int fLocked;
1828         int fScratch;
1829         int fCouldBeScratch;
1830         int fContent;
1831         const ScratchMap* fScratchMap;
1832         const UniqueHash* fUniqueHash;
1833 
1834         Stats(const GrResourceCache* cache) {
1835             memset(this, 0, sizeof(*this));
1836             fScratchMap = &cache->fScratchMap;
1837             fUniqueHash = &cache->fUniqueHash;
1838         }
1839 
1840         void update(GrGpuResource* resource) {
1841             fBytes += resource->gpuMemorySize();
1842 
1843             if (!resource->resourcePriv().isPurgeable()) {
1844                 ++fLocked;
1845             }
1846 
1847             const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1848             const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1849 
1850             if (resource->cacheAccess().isUsableAsScratch()) {
1851                 SkASSERT(!uniqueKey.isValid());
1852                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1853                 SkASSERT(!resource->cacheAccess().hasRef());
1854                 ++fScratch;
1855                 SkASSERT(fScratchMap->countForKey(scratchKey));
1856                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1857             } else if (scratchKey.isValid()) {
1858                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1859                          uniqueKey.isValid() || resource->cacheAccess().hasRef());
1860                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1861                 SkASSERT(!fScratchMap->has(resource, scratchKey));
1862             }
1863             if (uniqueKey.isValid()) {
1864                 ++fContent;
1865                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1866                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1867                          resource->resourcePriv().refsWrappedObjects());
1868             }
1869 
1870             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1871                 ++fBudgetedCount;
1872                 fBudgetedBytes += resource->gpuMemorySize();
1873             }
1874         }
1875     };
1876 
1877     {
1878         int count = 0;
1879         fScratchMap.foreach([&](const GrGpuResource& resource) {
1880             SkASSERT(resource.cacheAccess().isUsableAsScratch());
1881             count++;
1882         });
1883         SkASSERT(count == fScratchMap.count());
1884     }
1885 
1886     Stats stats(this);
1887     size_t purgeableBytes = 0;
1888     int numBudgetedResourcesFlushWillMakePurgeable = 0;
1889 
1890     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1891         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1892                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1893         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1894         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1895         if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1896             !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1897             fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1898             ++numBudgetedResourcesFlushWillMakePurgeable;
1899         }
1900         stats.update(fNonpurgeableResources[i]);
1901     }
1902     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1903         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1904         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1905         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1906         stats.update(fPurgeableQueue.at(i));
1907         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1908     }
1909 
1910     SkASSERT(fCount == this->getResourceCount());
1911     SkASSERT(fBudgetedCount <= fCount);
1912     SkASSERT(fBudgetedBytes <= fBytes);
1913     SkASSERT(stats.fBytes == fBytes);
1914     SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1915              numBudgetedResourcesFlushWillMakePurgeable);
1916     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1917     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1918     SkASSERT(purgeableBytes == fPurgeableBytes);
1919 #if GR_CACHE_STATS
1920     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1921     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1922     SkASSERT(fBytes <= fHighWaterBytes);
1923     SkASSERT(fCount <= fHighWaterCount);
1924     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1925     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1926 #endif
1927     SkASSERT(stats.fContent == fUniqueHash.count());
1928     SkASSERT(stats.fScratch == fScratchMap.count());
1929 
1930     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1931     // calls. This will be fixed when subresource registration is explicit.
1932     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1933     // SkASSERT(!overBudget || locked == count || fPurging);
1934 }
1935 
isInCache(const GrGpuResource * resource) const1936 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1937     int index = *resource->cacheAccess().accessCacheIndex();
1938     if (index < 0) {
1939         return false;
1940     }
1941     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1942         return true;
1943     }
1944     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1945         return true;
1946     }
1947     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1948     return false;
1949 }
1950 
1951 #endif // SK_DEBUG
1952 
1953 #if GR_TEST_UTILS
1954 
countUniqueKeysWithTag(const char * tag) const1955 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1956     int count = 0;
1957     fUniqueHash.foreach([&](const GrGpuResource& resource){
1958         if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1959             ++count;
1960         }
1961     });
1962     return count;
1963 }
1964 
changeTimestamp(uint32_t newTimestamp)1965 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1966     fTimestamp = newTimestamp;
1967 }
1968 
1969 #endif // GR_TEST_UTILS
1970