• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <ctime>
11 #include <vector>
12 #include <map>
13 #include <sstream>
14 #ifdef NOT_BUILD_FOR_OHOS_SDK
15 #include <parameters.h>
16 #endif
17 #include "include/core/SkString.h"
18 #include "include/gpu/GrDirectContext.h"
19 #include "include/private/GrSingleOwner.h"
20 #include "include/private/SkTo.h"
21 #include "include/utils/SkRandom.h"
22 #include "src/core/SkMessageBus.h"
23 #include "src/core/SkOpts.h"
24 #include "src/core/SkScopeExit.h"
25 #include "src/core/SkTSort.h"
26 #include "src/gpu/GrCaps.h"
27 #include "src/gpu/GrDirectContextPriv.h"
28 #include "src/gpu/GrGpuResourceCacheAccess.h"
29 #include "src/gpu/GrProxyProvider.h"
30 #include "src/gpu/GrTexture.h"
31 #include "src/gpu/GrTextureProxyCacheAccess.h"
32 #include "src/gpu/GrThreadSafeCache.h"
33 #include "src/gpu/GrTracing.h"
34 #include "src/gpu/SkGr.h"
35 
36 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
37 
38 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
39 
40 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
41 
42 //////////////////////////////////////////////////////////////////////////////
43 
GenerateResourceType()44 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
45     static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
46 
47     int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
48     if (type > SkTo<int32_t>(UINT16_MAX)) {
49         SK_ABORT("Too many Resource Types");
50     }
51 
52     return static_cast<ResourceType>(type);
53 }
54 
GenerateDomain()55 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
56     static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
57 
58     int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
59     if (domain > SkTo<int32_t>(UINT16_MAX)) {
60         SK_ABORT("Too many GrUniqueKey Domains");
61     }
62 
63     return static_cast<Domain>(domain);
64 }
65 
GrResourceKeyHash(const uint32_t * data,size_t size)66 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
67     return SkOpts::hash(data, size);
68 }
69 
70 //////////////////////////////////////////////////////////////////////////////
71 
72 class GrResourceCache::AutoValidate : ::SkNoncopyable {
73 public:
AutoValidate(GrResourceCache * cache)74     AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()75     ~AutoValidate() { fCache->validate(); }
76 private:
77     GrResourceCache* fCache;
78 };
79 
80 //////////////////////////////////////////////////////////////////////////////
81 
82 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
83 
TextureAwaitingUnref(GrTexture * texture)84 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
85         : fTexture(texture), fNumUnrefs(1) {}
86 
TextureAwaitingUnref(TextureAwaitingUnref && that)87 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
88     fTexture = std::exchange(that.fTexture, nullptr);
89     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
90 }
91 
operator =(TextureAwaitingUnref && that)92 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
93         TextureAwaitingUnref&& that) {
94     fTexture = std::exchange(that.fTexture, nullptr);
95     fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
96     return *this;
97 }
98 
~TextureAwaitingUnref()99 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
100     if (fTexture) {
101         for (int i = 0; i < fNumUnrefs; ++i) {
102             fTexture->unref();
103         }
104     }
105 }
106 
addRef()107 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
108 
unref()109 inline void GrResourceCache::TextureAwaitingUnref::unref() {
110     SkASSERT(fNumUnrefs > 0);
111     fTexture->unref();
112     --fNumUnrefs;
113 }
114 
finished()115 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
116 
117 //////////////////////////////////////////////////////////////////////////////
118 
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)119 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
120                                  GrDirectContext::DirectContextID owningContextID,
121                                  uint32_t familyID)
122         : fInvalidUniqueKeyInbox(familyID)
123         , fFreedTextureInbox(owningContextID)
124         , fOwningContextID(owningContextID)
125         , fContextUniqueID(familyID)
126         , fSingleOwner(singleOwner) {
127     SkASSERT(owningContextID.isValid());
128     SkASSERT(familyID != SK_InvalidUniqueID);
129 #ifdef NOT_BUILD_FOR_OHOS_SDK
130     static int overtimeDuration =
131         std::atoi(OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_overtime", "600").c_str());
132     static double maxBytesRate =
133         std::atof(OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_max_rate", "0.9").c_str());
134 #else
135     static int overtimeDuration = 600;
136     static double maxBytesRate = 0.9;
137 #endif
138     fMaxBytesRate = maxBytesRate;
139     fOvertimeDuration = overtimeDuration;
140 }
141 
~GrResourceCache()142 GrResourceCache::~GrResourceCache() {
143     this->releaseAll();
144 }
145 
setLimit(size_t bytes)146 void GrResourceCache::setLimit(size_t bytes) {
147     fMaxBytes = bytes;
148     this->purgeAsNeeded();
149 }
150 
151 #ifdef SKIA_DFX_FOR_OHOS
152 static constexpr int MB = 1024 * 1024;
153 
154 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
155 bool GrResourceCache::purgeUnlocakedResTraceEnabled_ =
156     std::atoi((OHOS::system::GetParameter("sys.graphic.skia.cache.debug", "0").c_str())) == 1;
157 #endif
158 
dumpInfo(SkString * out)159 void GrResourceCache::dumpInfo(SkString* out) {
160     if (out == nullptr) {
161         SkDebugf("OHOS GrResourceCache::dumpInfo outPtr is nullptr!");
162         return;
163     }
164     auto info = cacheInfo();
165     constexpr uint8_t STEP_INDEX = 1;
166     SkTArray<SkString> lines;
167     SkStrSplit(info.substr(STEP_INDEX, info.length() - STEP_INDEX).c_str(), ";", &lines);
168     for (int i = 0; i < lines.size(); ++i) {
169         out->appendf("    %s\n", lines[i].c_str());
170     }
171 }
172 
cacheInfo()173 std::string GrResourceCache::cacheInfo()
174 {
175     auto fPurgeableQueueInfoStr = cacheInfoPurgeableQueue();
176     auto fNonpurgeableResourcesInfoStr = cacheInfoNoPurgeableQueue();
177     size_t fRealAllocBytes = cacheInfoRealAllocSize();
178     auto fRealAllocInfoStr = cacheInfoRealAllocQueue();
179     auto fRealBytesOfPidInfoStr = realBytesOfPid();
180 
181     std::ostringstream cacheInfoStream;
182     cacheInfoStream << "[fPurgeableQueueInfoStr.count : " << fPurgeableQueue.count()
183         << "; fNonpurgeableResources.count : " << fNonpurgeableResources.count()
184         << "; fBudgetedBytes : " << fBudgetedBytes
185         << "(" << static_cast<size_t>(fBudgetedBytes / MB)
186         << " MB) / " << fMaxBytes
187         << "(" << static_cast<size_t>(fMaxBytes / MB)
188         << " MB); fBudgetedCount : " << fBudgetedCount
189         << "; fBytes : " << fBytes
190         << "(" << static_cast<size_t>(fBytes / MB)
191         << " MB); fPurgeableBytes : " << fPurgeableBytes
192         << "(" << static_cast<size_t>(fPurgeableBytes / MB)
193         << " MB); fAllocImageBytes : " << fAllocImageBytes
194         << "(" << static_cast<size_t>(fAllocImageBytes / MB)
195         << " MB); fAllocBufferBytes : " << fAllocBufferBytes
196         << "(" << static_cast<size_t>(fAllocBufferBytes / MB)
197         << " MB); fRealAllocBytes : " << fRealAllocBytes
198         << "(" << static_cast<size_t>(fRealAllocBytes / MB)
199         << " MB); fTimestamp : " << fTimestamp
200         << "; " << fPurgeableQueueInfoStr << "; " << fNonpurgeableResourcesInfoStr
201         << "; " << fRealAllocInfoStr << "; " << fRealBytesOfPidInfoStr;
202     return cacheInfoStream.str();
203 }
204 
205 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
traceBeforePurgeUnlockRes(const std::string & method,SimpleCacheInfo & simpleCacheInfo)206 void GrResourceCache::traceBeforePurgeUnlockRes(const std::string& method, SimpleCacheInfo& simpleCacheInfo)
207 {
208     if (purgeUnlocakedResTraceEnabled_) {
209         StartTrace(HITRACE_TAG_GRAPHIC_AGP, method + " begin cacheInfo = " + cacheInfo());
210     } else {
211         simpleCacheInfo.fPurgeableQueueCount = fPurgeableQueue.count();
212         simpleCacheInfo.fNonpurgeableResourcesCount = fNonpurgeableResources.count();
213         simpleCacheInfo.fPurgeableBytes = fPurgeableBytes;
214         simpleCacheInfo.fBudgetedCount = fBudgetedCount;
215         simpleCacheInfo.fBudgetedBytes = fBudgetedBytes;
216         simpleCacheInfo.fAllocImageBytes = fAllocImageBytes;
217         simpleCacheInfo.fAllocBufferBytes = fAllocBufferBytes;
218     }
219 }
220 
traceAfterPurgeUnlockRes(const std::string & method,const SimpleCacheInfo & simpleCacheInfo)221 void GrResourceCache::traceAfterPurgeUnlockRes(const std::string& method, const SimpleCacheInfo& simpleCacheInfo)
222 {
223     if (purgeUnlocakedResTraceEnabled_) {
224         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s", method.c_str(), cacheInfo().c_str());
225         FinishTrace(HITRACE_TAG_GRAPHIC_AGP);
226     } else {
227         HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s",
228             method.c_str(), cacheInfoComparison(simpleCacheInfo).c_str());
229     }
230 }
231 
cacheInfoComparison(const SimpleCacheInfo & simpleCacheInfo)232 std::string GrResourceCache::cacheInfoComparison(const SimpleCacheInfo& simpleCacheInfo)
233 {
234     std::ostringstream cacheInfoComparison;
235     cacheInfoComparison << "PurgeableCount : " << simpleCacheInfo.fPurgeableQueueCount
236         << " / " << fPurgeableQueue.count()
237         << "; NonpurgeableCount : " << simpleCacheInfo.fNonpurgeableResourcesCount
238         << " / " << fNonpurgeableResources.count()
239         << "; PurgeableBytes : " << simpleCacheInfo.fPurgeableBytes << " / " << fPurgeableBytes
240         << "; BudgetedCount : " << simpleCacheInfo.fBudgetedCount << " / " << fBudgetedCount
241         << "; BudgetedBytes : " << simpleCacheInfo.fBudgetedBytes << " / " << fBudgetedBytes
242         << "; AllocImageBytes : " << simpleCacheInfo.fAllocImageBytes << " / " << fAllocImageBytes
243         << "; AllocBufferBytes : " << simpleCacheInfo.fAllocBufferBytes << " / " << fAllocBufferBytes;
244     return cacheInfoComparison.str();
245 }
246 #endif // SKIA_OHOS_FOR_OHOS_TRACE
247 
cacheInfoPurgeableQueue()248 std::string GrResourceCache::cacheInfoPurgeableQueue()
249 {
250     std::map<uint32_t, int> purgSizeInfoWid;
251     std::map<uint32_t, int> purgCountInfoWid;
252     std::map<uint32_t, std::string> purgNameInfoWid;
253     std::map<uint32_t, int> purgPidInfoWid;
254 
255     std::map<uint32_t, int> purgSizeInfoPid;
256     std::map<uint32_t, int> purgCountInfoPid;
257     std::map<uint32_t, std::string> purgNameInfoPid;
258 
259     std::map<uint32_t, int> purgSizeInfoFid;
260     std::map<uint32_t, int> purgCountInfoFid;
261     std::map<uint32_t, std::string> purgNameInfoFid;
262 
263     int purgCountUnknown = 0;
264     int purgSizeUnknown = 0;
265 
266     for (int i = 0; i < fPurgeableQueue.count(); i++) {
267         auto resource = fPurgeableQueue.at(i);
268         auto resourceTag = resource->getResourceTag();
269         if (resourceTag.fWid != 0) {
270             updatePurgeableWidMap(resource, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
271         } else if (resourceTag.fPid != 0) {
272             updatePurgeablePidMap(resource, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
273         } else if (resourceTag.fFid != 0) {
274             updatePurgeableFidMap(resource, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
275         } else {
276             purgCountUnknown++;
277             purgSizeUnknown += resource->gpuMemorySize();
278         }
279     }
280 
281     std::string infoStr;
282     if (purgSizeInfoWid.size() > 0) {
283         infoStr += ";PurgeableInfo_Node:[";
284         updatePurgeableWidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
285     }
286     if (purgSizeInfoPid.size() > 0) {
287         infoStr += ";PurgeableInfo_Pid:[";
288         updatePurgeablePidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgCountInfoWid);
289     }
290     if (purgSizeInfoFid.size() > 0) {
291         infoStr += ";PurgeableInfo_Fid:[";
292         updatePurgeableFidInfo(infoStr, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
293     }
294     updatePurgeableUnknownInfo(infoStr, ";PurgeableInfo_Unknown:", purgCountUnknown, purgSizeUnknown);
295     return infoStr;
296 }
297 
cacheInfoNoPurgeableQueue()298 std::string GrResourceCache::cacheInfoNoPurgeableQueue()
299 {
300     std::map<uint32_t, int> noPurgSizeInfoWid;
301     std::map<uint32_t, int> noPurgCountInfoWid;
302     std::map<uint32_t, std::string> noPurgNameInfoWid;
303     std::map<uint32_t, int> noPurgPidInfoWid;
304 
305     std::map<uint32_t, int> noPurgSizeInfoPid;
306     std::map<uint32_t, int> noPurgCountInfoPid;
307     std::map<uint32_t, std::string> noPurgNameInfoPid;
308 
309     std::map<uint32_t, int> noPurgSizeInfoFid;
310     std::map<uint32_t, int> noPurgCountInfoFid;
311     std::map<uint32_t, std::string> noPurgNameInfoFid;
312 
313     int noPurgCountUnknown = 0;
314     int noPurgSizeUnknown = 0;
315 
316     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
317         auto resource = fNonpurgeableResources[i];
318         if (resource == nullptr) {
319             continue;
320         }
321         auto resourceTag = resource->getResourceTag();
322         if (resourceTag.fWid != 0) {
323             updatePurgeableWidMap(resource, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
324         } else if (resourceTag.fPid != 0) {
325             updatePurgeablePidMap(resource, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
326         } else if (resourceTag.fFid != 0) {
327             updatePurgeableFidMap(resource, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
328         } else {
329             noPurgCountUnknown++;
330             noPurgSizeUnknown += resource->gpuMemorySize();
331         }
332     }
333 
334     std::string infoStr;
335     if (noPurgSizeInfoWid.size() > 0) {
336         infoStr += ";NonPurgeableInfo_Node:[";
337         updatePurgeableWidInfo(infoStr, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
338     }
339     if (noPurgSizeInfoPid.size() > 0) {
340         infoStr += ";NonPurgeableInfo_Pid:[";
341         updatePurgeablePidInfo(infoStr, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
342     }
343     if (noPurgSizeInfoFid.size() > 0) {
344         infoStr += ";NonPurgeableInfo_Fid:[";
345         updatePurgeableFidInfo(infoStr, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
346     }
347     updatePurgeableUnknownInfo(infoStr, ";NonPurgeableInfo_Unknown:", noPurgCountUnknown, noPurgSizeUnknown);
348     return infoStr;
349 }
350 
cacheInfoRealAllocSize()351 size_t GrResourceCache::cacheInfoRealAllocSize()
352 {
353     size_t realAllocImageSize = 0;
354     for (int i = 0; i < fPurgeableQueue.count(); i++) {
355         auto resource = fPurgeableQueue.at(i);
356         if (resource == nullptr || !resource->isRealAlloc()) {
357             continue;
358         }
359         realAllocImageSize += resource->getRealAllocSize();
360     }
361     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
362         auto resource = fNonpurgeableResources[i];
363         if (resource == nullptr || !resource->isRealAlloc()) {
364             continue;
365         }
366         realAllocImageSize += resource->getRealAllocSize();
367     }
368     return realAllocImageSize;
369 }
370 
cacheInfoRealAllocQueue()371 std::string GrResourceCache::cacheInfoRealAllocQueue()
372 {
373     std::map<uint32_t, std::string> realAllocNameInfoWid;
374     std::map<uint32_t, int> realAllocSizeInfoWid;
375     std::map<uint32_t, int> realAllocPidInfoWid;
376     std::map<uint32_t, int> realAllocCountInfoWid;
377 
378     std::map<uint32_t, std::string> realAllocNameInfoPid;
379     std::map<uint32_t, int> realAllocSizeInfoPid;
380     std::map<uint32_t, int> realAllocCountInfoPid;
381 
382     std::map<uint32_t, std::string> realAllocNameInfoFid;
383     std::map<uint32_t, int> realAllocSizeInfoFid;
384     std::map<uint32_t, int> realAllocCountInfoFid;
385 
386     int realAllocCountUnknown = 0;
387     int realAllocSizeUnknown = 0;
388 
389     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
390         auto resource = fNonpurgeableResources[i];
391         if (resource == nullptr || !resource->isRealAlloc()) {
392             continue;
393         }
394         auto resourceTag = resource->getResourceTag();
395         if (resourceTag.fWid != 0) {
396             updateRealAllocWidMap(
397                 resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
398         } else if (resourceTag.fPid != 0) {
399             updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
400         } else if (resourceTag.fFid != 0) {
401             updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
402         } else {
403             realAllocCountUnknown++;
404             realAllocSizeUnknown += resource->getRealAllocSize();
405         }
406     }
407 
408     for (int i = 0; i < fPurgeableQueue.count(); i++) {
409         auto resource = fPurgeableQueue.at(i);
410         if (resource == nullptr || !resource->isRealAlloc()) {
411             continue;
412         }
413         auto resourceTag = resource->getResourceTag();
414         if (resourceTag.fWid != 0) {
415             updateRealAllocWidMap(
416                 resource, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
417         } else if (resourceTag.fPid != 0) {
418             updateRealAllocPidMap(resource, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
419         } else if (resourceTag.fFid != 0) {
420             updateRealAllocFidMap(resource, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
421         } else {
422             realAllocCountUnknown++;
423             realAllocSizeUnknown += resource->getRealAllocSize();
424         }
425     }
426 
427     std::string infoStr;
428     if (realAllocSizeInfoWid.size() > 0) {
429         infoStr += ";RealAllocInfo_Node:[";
430         updatePurgeableWidInfo(
431             infoStr, realAllocNameInfoWid, realAllocSizeInfoWid, realAllocPidInfoWid, realAllocCountInfoWid);
432     }
433     if (realAllocSizeInfoPid.size() > 0) {
434         infoStr += ";RealAllocInfo_Pid:[";
435         updatePurgeablePidInfo(infoStr, realAllocNameInfoPid, realAllocSizeInfoPid, realAllocCountInfoPid);
436     }
437     if (realAllocSizeInfoFid.size() > 0) {
438         infoStr += ";RealAllocInfo_Fid:[";
439         updatePurgeableFidInfo(infoStr, realAllocNameInfoFid, realAllocSizeInfoFid, realAllocCountInfoFid);
440     }
441     updatePurgeableUnknownInfo(infoStr, ";RealAllocInfo_Unknown:", realAllocCountUnknown, realAllocSizeUnknown);
442     return infoStr;
443 }
444 
realBytesOfPid()445 std::string GrResourceCache::realBytesOfPid()
446 {
447     std::string infoStr;
448     infoStr += ";fBytesOfPid : [";
449     if (fBytesOfPid.size() > 0) {
450         for (auto it = fBytesOfPid.begin(); it != fBytesOfPid.end(); it++) {
451             infoStr += std::to_string(it->first) + ":" + std::to_string(it->second) + ", ";
452         }
453     }
454     infoStr += "]";
455     return infoStr;
456 }
457 
updatePurgeableWidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,int> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)458 void GrResourceCache::updatePurgeableWidMap(GrGpuResource* resource,
459                                             std::map<uint32_t, std::string>& nameInfoWid,
460                                             std::map<uint32_t, int>& sizeInfoWid,
461                                             std::map<uint32_t, int>& pidInfoWid,
462                                             std::map<uint32_t, int>& countInfoWid)
463 {
464     auto resourceTag = resource->getResourceTag();
465     auto it = sizeInfoWid.find(resourceTag.fWid);
466     if (it != sizeInfoWid.end()) {
467         sizeInfoWid[resourceTag.fWid] = it->second + resource->gpuMemorySize();
468         countInfoWid[resourceTag.fWid]++;
469     } else {
470         sizeInfoWid[resourceTag.fWid] = resource->gpuMemorySize();
471         nameInfoWid[resourceTag.fWid] = resourceTag.fName;
472         pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
473         countInfoWid[resourceTag.fWid] = 1;
474     }
475 }
476 
updatePurgeablePidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,int> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)477 void GrResourceCache::updatePurgeablePidMap(GrGpuResource* resource,
478                                             std::map<uint32_t, std::string>& nameInfoPid,
479                                             std::map<uint32_t, int>& sizeInfoPid,
480                                             std::map<uint32_t, int>& countInfoPid)
481 {
482     auto resourceTag = resource->getResourceTag();
483     auto it = sizeInfoPid.find(resourceTag.fPid);
484     if (it != sizeInfoPid.end()) {
485         sizeInfoPid[resourceTag.fPid] = it->second + resource->gpuMemorySize();
486         countInfoPid[resourceTag.fPid]++;
487     } else {
488         sizeInfoPid[resourceTag.fPid] = resource->gpuMemorySize();
489         nameInfoPid[resourceTag.fPid] = resourceTag.fName;
490         countInfoPid[resourceTag.fPid] = 1;
491     }
492 }
493 
updatePurgeableFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,int> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)494 void GrResourceCache::updatePurgeableFidMap(GrGpuResource* resource,
495                                             std::map<uint32_t, std::string>& nameInfoFid,
496                                             std::map<uint32_t, int>& sizeInfoFid,
497                                             std::map<uint32_t, int>& countInfoFid)
498 {
499     auto resourceTag = resource->getResourceTag();
500     auto it = sizeInfoFid.find(resourceTag.fFid);
501     if (it != sizeInfoFid.end()) {
502         sizeInfoFid[resourceTag.fFid] = it->second + resource->gpuMemorySize();
503         countInfoFid[resourceTag.fFid]++;
504     } else {
505         sizeInfoFid[resourceTag.fFid] = resource->gpuMemorySize();
506         nameInfoFid[resourceTag.fFid] = resourceTag.fName;
507         countInfoFid[resourceTag.fFid] = 1;
508     }
509 }
510 
updateRealAllocWidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,int> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)511 void GrResourceCache::updateRealAllocWidMap(GrGpuResource* resource,
512                                             std::map<uint32_t, std::string>& nameInfoWid,
513                                             std::map<uint32_t, int>& sizeInfoWid,
514                                             std::map<uint32_t, int>& pidInfoWid,
515                                             std::map<uint32_t, int>& countInfoWid)
516 {
517     size_t size = resource->getRealAllocSize();
518     auto resourceTag = resource->getResourceTag();
519     auto it = sizeInfoWid.find(resourceTag.fWid);
520     if (it != sizeInfoWid.end()) {
521         sizeInfoWid[resourceTag.fWid] = it->second + size;
522         countInfoWid[resourceTag.fWid]++;
523     } else {
524         sizeInfoWid[resourceTag.fWid] = size;
525         nameInfoWid[resourceTag.fWid] = resourceTag.fName;
526         pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
527         countInfoWid[resourceTag.fWid] = 1;
528     }
529 }
530 
updateRealAllocPidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,int> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)531 void GrResourceCache::updateRealAllocPidMap(GrGpuResource* resource,
532                                             std::map<uint32_t, std::string>& nameInfoPid,
533                                             std::map<uint32_t, int>& sizeInfoPid,
534                                             std::map<uint32_t, int>& countInfoPid)
535 {
536     size_t size = resource->getRealAllocSize();
537     auto resourceTag = resource->getResourceTag();
538     auto it = sizeInfoPid.find(resourceTag.fPid);
539     if (it != sizeInfoPid.end()) {
540         sizeInfoPid[resourceTag.fPid] = it->second + size;
541         countInfoPid[resourceTag.fPid]++;
542     } else {
543         sizeInfoPid[resourceTag.fPid] = size;
544         nameInfoPid[resourceTag.fPid] = resourceTag.fName;
545         countInfoPid[resourceTag.fPid] = 1;
546     }
547 }
548 
updateRealAllocFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,int> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)549 void GrResourceCache::updateRealAllocFidMap(GrGpuResource* resource,
550                                             std::map<uint32_t, std::string>& nameInfoFid,
551                                             std::map<uint32_t, int>& sizeInfoFid,
552                                             std::map<uint32_t, int>& countInfoFid)
553 {
554     size_t size = resource->getRealAllocSize();
555     auto resourceTag = resource->getResourceTag();
556     auto it = sizeInfoFid.find(resourceTag.fFid);
557     if (it != sizeInfoFid.end()) {
558         sizeInfoFid[resourceTag.fFid] = it->second + size;
559         countInfoFid[resourceTag.fFid]++;
560     } else {
561         sizeInfoFid[resourceTag.fFid] = size;
562         nameInfoFid[resourceTag.fFid] = resourceTag.fName;
563         countInfoFid[resourceTag.fFid] = 1;
564     }
565 }
566 
updatePurgeableWidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoWid,std::map<uint32_t,int> & sizeInfoWid,std::map<uint32_t,int> & pidInfoWid,std::map<uint32_t,int> & countInfoWid)567 void GrResourceCache::updatePurgeableWidInfo(std::string& infoStr,
568                                              std::map<uint32_t, std::string>& nameInfoWid,
569                                              std::map<uint32_t, int>& sizeInfoWid,
570                                              std::map<uint32_t, int>& pidInfoWid,
571                                              std::map<uint32_t, int>& countInfoWid)
572 {
573     for (auto it = sizeInfoWid.begin(); it != sizeInfoWid.end(); it++) {
574         infoStr += "[" + nameInfoWid[it->first] +
575             ",pid=" + std::to_string(pidInfoWid[it->first]) +
576             ",NodeId=" + std::to_string(it->first & 0xFFFFFFFF) +
577             ",count=" + std::to_string(countInfoWid[it->first]) +
578             ",size=" + std::to_string(it->second) +
579             "(" + std::to_string(it->second / MB) + " MB)],";
580     }
581     infoStr += ']';
582 }
583 
updatePurgeablePidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,int> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)584 void GrResourceCache::updatePurgeablePidInfo(std::string& infoStr,
585                                              std::map<uint32_t, std::string>& nameInfoPid,
586                                              std::map<uint32_t, int>& sizeInfoPid,
587                                              std::map<uint32_t, int>& countInfoPid)
588 {
589     for (auto it = sizeInfoPid.begin(); it != sizeInfoPid.end(); it++) {
590         infoStr += "[" + nameInfoPid[it->first] +
591             ",pid=" + std::to_string(it->first) +
592             ",count=" + std::to_string(countInfoPid[it->first]) +
593             ",size=" + std::to_string(it->second) +
594             "(" + std::to_string(it->second / MB) + " MB)],";
595     }
596     infoStr += ']';
597 }
598 
updatePurgeableFidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,int> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)599 void GrResourceCache::updatePurgeableFidInfo(std::string& infoStr,
600                                              std::map<uint32_t, std::string>& nameInfoFid,
601                                              std::map<uint32_t, int>& sizeInfoFid,
602                                              std::map<uint32_t, int>& countInfoFid)
603 {
604     for (auto it = sizeInfoFid.begin(); it != sizeInfoFid.end(); it++) {
605         infoStr += "[" + nameInfoFid[it->first] +
606             ",typeid=" + std::to_string(it->first) +
607             ",count=" + std::to_string(countInfoFid[it->first]) +
608             ",size=" + std::to_string(it->second) +
609             "(" + std::to_string(it->second / MB) + " MB)],";
610     }
611     infoStr += ']';
612 }
613 
updatePurgeableUnknownInfo(std::string & infoStr,const std::string & unknownPrefix,const int countUnknown,const int sizeUnknown)614 void GrResourceCache::updatePurgeableUnknownInfo(
615     std::string& infoStr, const std::string& unknownPrefix, const int countUnknown, const int sizeUnknown)
616 {
617     if (countUnknown > 0) {
618         infoStr += unknownPrefix +
619             "[count=" + std::to_string(countUnknown) +
620             ",size=" + std::to_string(sizeUnknown) +
621             "(" + std::to_string(sizeUnknown / MB) + "MB)]";
622     }
623 }
624 #endif
625 
insertResource(GrGpuResource * resource)626 void GrResourceCache::insertResource(GrGpuResource* resource)
627 {
628     ASSERT_SINGLE_OWNER
629     SkASSERT(resource);
630     SkASSERT(!this->isInCache(resource));
631     SkASSERT(!resource->wasDestroyed());
632     SkASSERT(!resource->resourcePriv().isPurgeable());
633     if (!resource || this->isInCache(resource) || resource->wasDestroyed() || resource->resourcePriv().isPurgeable()) {
634         SkDebugf("OHOS GrResourceCache::insertResource resource is invalid!!!");
635         return;
636     }
637     // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
638     // up iterating over all the resources that already have timestamps.
639     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
640 
641     this->addToNonpurgeableArray(resource);
642 
643     size_t size = resource->gpuMemorySize();
644     SkDEBUGCODE(++fCount;)
645     fBytes += size;
646 
647     // OH ISSUE: memory count
648     auto pid = resource->getResourceTag().fPid;
649     if (pid && resource->isRealAlloc()) {
650         auto& pidSize = fBytesOfPid[pid];
651         pidSize += size;
652         fUpdatedBytesOfPid[pid] = pidSize;
653         if (pidSize >= fMemoryControl_ && fExitedPid_.find(pid) == fExitedPid_.end() && fMemoryOverflowCallback_) {
654             fMemoryOverflowCallback_(pid, pidSize, true);
655             fExitedPid_.insert(pid);
656             SkDebugf("OHOS resource overflow! pid[%{public}d], size[%{public}zu]", pid, pidSize);
657 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
658             HITRACE_METER_FMT(HITRACE_TAG_GRAPHIC_AGP, "OHOS gpu resource overflow: pid(%d), size:(%zu)",
659                 pid, pidSize);
660 #endif
661         }
662     }
663 
664 #if GR_CACHE_STATS
665     fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
666     fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
667 #endif
668     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
669         ++fBudgetedCount;
670         fBudgetedBytes += size;
671         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
672                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
673 #if GR_CACHE_STATS
674         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
675         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
676 #endif
677     }
678     SkASSERT(!resource->cacheAccess().isUsableAsScratch());
679 #ifdef SKIA_OHOS_FOR_OHOS_TRACE
680     if (fBudgetedBytes >= fMaxBytes) {
681         HITRACE_OHOS_NAME_FMT_ALWAYS("cache over fBudgetedBytes:(%u),fMaxBytes:(%u)", fBudgetedBytes, fMaxBytes);
682 #ifdef SKIA_DFX_FOR_OHOS
683         SimpleCacheInfo simpleCacheInfo;
684         traceBeforePurgeUnlockRes("insertResource", simpleCacheInfo);
685 #endif
686         this->purgeAsNeeded();
687 #ifdef SKIA_DFX_FOR_OHOS
688         traceAfterPurgeUnlockRes("insertResource", simpleCacheInfo);
689 #endif
690     } else {
691         this->purgeAsNeeded();
692     }
693 #else
694     this->purgeAsNeeded();
695 #endif
696 }
697 
removeResource(GrGpuResource * resource)698 void GrResourceCache::removeResource(GrGpuResource* resource) {
699     ASSERT_SINGLE_OWNER
700     this->validate();
701     SkASSERT(this->isInCache(resource));
702 
703     size_t size = resource->gpuMemorySize();
704     if (resource->resourcePriv().isPurgeable() && this->isInPurgeableCache(resource)) {
705         fPurgeableQueue.remove(resource);
706         fPurgeableBytes -= size;
707     } else if (this->isInNonpurgeableCache(resource)) {
708         this->removeFromNonpurgeableArray(resource);
709     }
710 
711     SkDEBUGCODE(--fCount;)
712     fBytes -= size;
713 
714     // OH ISSUE: memory count
715     auto pid = resource->getResourceTag().fPid;
716     if (pid && resource->isRealAlloc()) {
717         auto& pidSize = fBytesOfPid[pid];
718         pidSize -= size;
719         fUpdatedBytesOfPid[pid] = pidSize;
720         if (pidSize == 0) {
721             fBytesOfPid.erase(pid);
722         }
723     }
724 
725     if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
726         --fBudgetedCount;
727         fBudgetedBytes -= size;
728         TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
729                        fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
730     }
731 
732     if (resource->cacheAccess().isUsableAsScratch()) {
733         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
734     }
735     if (resource->getUniqueKey().isValid()) {
736         fUniqueHash.remove(resource->getUniqueKey());
737     }
738     this->validate();
739 }
740 
abandonAll()741 void GrResourceCache::abandonAll() {
742     AutoValidate av(this);
743 
744     // We need to make sure to free any resources that were waiting on a free message but never
745     // received one.
746     fTexturesAwaitingUnref.reset();
747 
748     while (fNonpurgeableResources.count()) {
749         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
750         SkASSERT(!back->wasDestroyed());
751         back->cacheAccess().abandon();
752     }
753 
754     while (fPurgeableQueue.count()) {
755         GrGpuResource* top = fPurgeableQueue.peek();
756         SkASSERT(!top->wasDestroyed());
757         top->cacheAccess().abandon();
758     }
759 
760     fThreadSafeCache->dropAllRefs();
761 
762     SkASSERT(!fScratchMap.count());
763     SkASSERT(!fUniqueHash.count());
764     SkASSERT(!fCount);
765     SkASSERT(!this->getResourceCount());
766     SkASSERT(!fBytes);
767     SkASSERT(!fBudgetedCount);
768     SkASSERT(!fBudgetedBytes);
769     SkASSERT(!fPurgeableBytes);
770     SkASSERT(!fTexturesAwaitingUnref.count());
771 }
772 
releaseAll()773 void GrResourceCache::releaseAll() {
774     AutoValidate av(this);
775 
776     fThreadSafeCache->dropAllRefs();
777 
778     this->processFreedGpuResources();
779 
780     // We need to make sure to free any resources that were waiting on a free message but never
781     // received one.
782     fTexturesAwaitingUnref.reset();
783 
784     SkASSERT(fProxyProvider); // better have called setProxyProvider
785     SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
786 
787     // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
788     // they also have a raw pointer back to this class (which is presumably going away)!
789     fProxyProvider->removeAllUniqueKeys();
790 
791     while (fNonpurgeableResources.count()) {
792         GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
793         SkASSERT(!back->wasDestroyed());
794         back->cacheAccess().release();
795     }
796 
797     while (fPurgeableQueue.count()) {
798         GrGpuResource* top = fPurgeableQueue.peek();
799         SkASSERT(!top->wasDestroyed());
800         top->cacheAccess().release();
801     }
802 
803     SkASSERT(!fScratchMap.count());
804     SkASSERT(!fUniqueHash.count());
805     SkASSERT(!fCount);
806     SkASSERT(!this->getResourceCount());
807     SkASSERT(!fBytes);
808     SkASSERT(!fBudgetedCount);
809     SkASSERT(!fBudgetedBytes);
810     SkASSERT(!fPurgeableBytes);
811     SkASSERT(!fTexturesAwaitingUnref.count());
812 }
813 
releaseByTag(const GrGpuResourceTag & tag)814 void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
815     AutoValidate av(this);
816     this->processFreedGpuResources();
817     SkASSERT(fProxyProvider); // better have called setProxyProvider
818     std::vector<GrGpuResource*> recycleVector;
819     for (int i = 0; i < fNonpurgeableResources.count(); i++) {
820         GrGpuResource* resource = fNonpurgeableResources[i];
821         if (tag.filter(resource->getResourceTag())) {
822             recycleVector.emplace_back(resource);
823             if (resource->getUniqueKey().isValid()) {
824                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
825                     GrProxyProvider::InvalidateGPUResource::kNo);
826             }
827         }
828     }
829 
830     for (int i = 0; i < fPurgeableQueue.count(); i++) {
831         GrGpuResource* resource = fPurgeableQueue.at(i);
832         if (tag.filter(resource->getResourceTag())) {
833             recycleVector.emplace_back(resource);
834             if (resource->getUniqueKey().isValid()) {
835                 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
836                     GrProxyProvider::InvalidateGPUResource::kNo);
837             }
838         }
839     }
840 
841     for (auto resource : recycleVector) {
842         SkASSERT(!resource->wasDestroyed());
843         resource->cacheAccess().release();
844     }
845 }
846 
setCurrentGrResourceTag(const GrGpuResourceTag & tag)847 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
848     if (tag.isGrTagValid()) {
849         grResourceTagCacheStack.push(tag);
850         return;
851     }
852     if (!grResourceTagCacheStack.empty()) {
853         grResourceTagCacheStack.pop();
854     }
855 }
856 
popGrResourceTag()857 void GrResourceCache::popGrResourceTag()
858 {
859     if (!grResourceTagCacheStack.empty()) {
860         grResourceTagCacheStack.pop();
861     }
862 }
863 
getCurrentGrResourceTag() const864 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
865     if (grResourceTagCacheStack.empty()) {
866         return{};
867     }
868     return grResourceTagCacheStack.top();
869 }
870 
getAllGrGpuResourceTags() const871 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
872     std::set<GrGpuResourceTag> result;
873     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
874         auto tag = fNonpurgeableResources[i]->getResourceTag();
875         result.insert(tag);
876     }
877     return result;
878 }
879 
880 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)881 void GrResourceCache::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
882 {
883     fUpdatedBytesOfPid.swap(out);
884 }
885 
886 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)887 void GrResourceCache::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
888 {
889     if (fMemoryOverflowCallback_ == nullptr) {
890         fMemoryOverflowCallback_ = callback;
891         fMemoryControl_ = size;
892     }
893 }
894 
895 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const896 bool GrResourceCache::isPidAbnormal() const
897 {
898     return fExitedPid_.find(getCurrentGrResourceTag().fPid) != fExitedPid_.end();
899 }
900 
901 // OH ISSUE: change the fbyte when the resource tag changes.
changeByteOfPid(int32_t beforePid,int32_t afterPid,size_t bytes)902 void GrResourceCache::changeByteOfPid(int32_t beforePid, int32_t afterPid, size_t bytes)
903 {
904     if (beforePid) {
905         auto& pidSize = fBytesOfPid[beforePid];
906         pidSize -= bytes;
907         fUpdatedBytesOfPid[beforePid] = pidSize;
908         if (pidSize == 0) {
909             fBytesOfPid.erase(beforePid);
910         }
911     }
912     if (afterPid) {
913         auto& size = fBytesOfPid[afterPid];
914         size += bytes;
915         fUpdatedBytesOfPid[afterPid] = size;
916     }
917 }
918 
refResource(GrGpuResource * resource)919 void GrResourceCache::refResource(GrGpuResource* resource) {
920     SkASSERT(resource);
921     SkASSERT(resource->getContext()->priv().getResourceCache() == this);
922     if (resource->cacheAccess().hasRef()) {
923         resource->ref();
924     } else {
925         this->refAndMakeResourceMRU(resource);
926     }
927     this->validate();
928 }
929 
930 class GrResourceCache::AvailableForScratchUse {
931 public:
AvailableForScratchUse()932     AvailableForScratchUse() { }
933 
operator ()(const GrGpuResource * resource) const934     bool operator()(const GrGpuResource* resource) const {
935         // Everything that is in the scratch map should be usable as a
936         // scratch resource.
937         return true;
938     }
939 };
940 
findAndRefScratchResource(const GrScratchKey & scratchKey)941 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
942     SkASSERT(scratchKey.isValid());
943 
944     GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
945     if (resource) {
946         fScratchMap.remove(scratchKey, resource);
947         if (!this->isInCache(resource)) {
948             SkDebugf("OHOS GrResourceCache::findAndRefScratchResource not in cache, return!!!");
949             return nullptr;
950         }
951         this->refAndMakeResourceMRU(resource);
952         this->validate();
953     }
954     return resource;
955 }
956 
willRemoveScratchKey(const GrGpuResource * resource)957 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
958     ASSERT_SINGLE_OWNER
959     SkASSERT(resource->resourcePriv().getScratchKey().isValid());
960     if (resource->cacheAccess().isUsableAsScratch()) {
961         fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
962     }
963 }
964 
removeUniqueKey(GrGpuResource * resource)965 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
966     ASSERT_SINGLE_OWNER
967     // Someone has a ref to this resource in order to have removed the key. When the ref count
968     // reaches zero we will get a ref cnt notification and figure out what to do with it.
969     if (resource->getUniqueKey().isValid()) {
970         SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
971         fUniqueHash.remove(resource->getUniqueKey());
972     }
973     resource->cacheAccess().removeUniqueKey();
974     if (resource->cacheAccess().isUsableAsScratch()) {
975         fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
976     }
977 
978     // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
979     // require purging. However, the resource must be ref'ed to get here and therefore can't
980     // be purgeable. We'll purge it when the refs reach zero.
981     SkASSERT(!resource->resourcePriv().isPurgeable());
982     this->validate();
983 }
984 
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)985 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
986     ASSERT_SINGLE_OWNER
987     SkASSERT(resource);
988     SkASSERT(this->isInCache(resource));
989 
990     // If another resource has the new key, remove its key then install the key on this resource.
991     if (newKey.isValid()) {
992         if (GrGpuResource* old = fUniqueHash.find(newKey)) {
993             // If the old resource using the key is purgeable and is unreachable, then remove it.
994             if (!old->resourcePriv().getScratchKey().isValid() &&
995                 old->resourcePriv().isPurgeable()) {
996                 old->cacheAccess().release();
997             } else {
998                 // removeUniqueKey expects an external owner of the resource.
999                 this->removeUniqueKey(sk_ref_sp(old).get());
1000             }
1001         }
1002         SkASSERT(nullptr == fUniqueHash.find(newKey));
1003 
1004         // Remove the entry for this resource if it already has a unique key.
1005         if (resource->getUniqueKey().isValid()) {
1006             SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
1007             fUniqueHash.remove(resource->getUniqueKey());
1008             SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
1009         } else {
1010             // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
1011             // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
1012             // unique key until after this check.
1013             if (resource->cacheAccess().isUsableAsScratch()) {
1014                 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1015             }
1016         }
1017 
1018         resource->cacheAccess().setUniqueKey(newKey);
1019         fUniqueHash.add(resource);
1020     } else {
1021         this->removeUniqueKey(resource);
1022     }
1023 
1024     this->validate();
1025 }
1026 
refAndMakeResourceMRU(GrGpuResource * resource)1027 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
1028     ASSERT_SINGLE_OWNER
1029     SkASSERT(resource);
1030     SkASSERT(this->isInCache(resource));
1031 
1032     if (resource->resourcePriv().isPurgeable()) {
1033         // It's about to become unpurgeable.
1034         if (this->isInPurgeableCache(resource)) {
1035             fPurgeableBytes -= resource->gpuMemorySize();
1036             fPurgeableQueue.remove(resource);
1037         }
1038         if (!this->isInNonpurgeableCache(resource)) {
1039             this->addToNonpurgeableArray(resource);
1040         } else {
1041             SkDebugf("OHOS resource in isInNonpurgeableCache, do not add again!");
1042         }
1043     } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
1044                resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1045         SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
1046         fNumBudgetedResourcesFlushWillMakePurgeable--;
1047     }
1048     resource->cacheAccess().ref();
1049 
1050     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1051     this->validate();
1052 }
1053 
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)1054 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
1055                                                GrGpuResource::LastRemovedRef removedRef) {
1056     ASSERT_SINGLE_OWNER
1057     SkASSERT(resource);
1058     SkASSERT(!resource->wasDestroyed());
1059     SkASSERT(this->isInCache(resource));
1060     // This resource should always be in the nonpurgeable array when this function is called. It
1061     // will be moved to the queue if it is newly purgeable.
1062     SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
1063     if (!resource || resource->wasDestroyed() || this->isInPurgeableCache(resource) ||
1064         !this->isInNonpurgeableCache(resource)) {
1065         SkDebugf("OHOS GrResourceCache::notifyARefCntReachedZero return!");
1066         return;
1067     }
1068     if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
1069         if (resource->cacheAccess().isUsableAsScratch()) {
1070             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1071         }
1072     }
1073 
1074     if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1075         this->validate();
1076         return;
1077     }
1078 
1079 #ifdef SK_DEBUG
1080     // When the timestamp overflows validate() is called. validate() checks that resources in
1081     // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
1082     // the purgeable queue happens just below in this function. So we mark it as an exception.
1083     if (resource->resourcePriv().isPurgeable()) {
1084         fNewlyPurgeableResourceForValidation = resource;
1085     }
1086 #endif
1087     resource->cacheAccess().setTimestamp(this->getNextTimestamp());
1088     SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
1089 
1090     if (!resource->resourcePriv().isPurgeable() &&
1091         resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1092         ++fNumBudgetedResourcesFlushWillMakePurgeable;
1093     }
1094 
1095     if (!resource->resourcePriv().isPurgeable()) {
1096         this->validate();
1097         return;
1098     }
1099 
1100     this->removeFromNonpurgeableArray(resource);
1101     fPurgeableQueue.insert(resource);
1102     resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
1103     fPurgeableBytes += resource->gpuMemorySize();
1104 
1105     bool hasUniqueKey = resource->getUniqueKey().isValid();
1106 
1107     GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
1108 
1109     if (budgetedType == GrBudgetedType::kBudgeted) {
1110         // Purge the resource immediately if we're over budget
1111         // Also purge if the resource has neither a valid scratch key nor a unique key.
1112         bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
1113         if (!this->overBudget() && hasKey) {
1114             return;
1115         }
1116     } else {
1117         // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
1118         // they can be reused again by the image connected to the unique key.
1119         if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
1120             return;
1121         }
1122         // Check whether this resource could still be used as a scratch resource.
1123         if (!resource->resourcePriv().refsWrappedObjects() &&
1124             resource->resourcePriv().getScratchKey().isValid()) {
1125             // We won't purge an existing resource to make room for this one.
1126             if (this->wouldFit(resource->gpuMemorySize())) {
1127                 resource->resourcePriv().makeBudgeted();
1128                 return;
1129             }
1130         }
1131     }
1132 
1133     SkDEBUGCODE(int beforeCount = this->getResourceCount();)
1134     resource->cacheAccess().release();
1135     // We should at least free this resource, perhaps dependent resources as well.
1136     SkASSERT(this->getResourceCount() < beforeCount);
1137     this->validate();
1138 }
1139 
didChangeBudgetStatus(GrGpuResource * resource)1140 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
1141     ASSERT_SINGLE_OWNER
1142     SkASSERT(resource);
1143     SkASSERT(this->isInCache(resource));
1144 
1145     size_t size = resource->gpuMemorySize();
1146     // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
1147     // resource become purgeable. However, we should never allow that transition. Wrapped
1148     // resources are the only resources that can be in that state and they aren't allowed to
1149     // transition from one budgeted state to another.
1150     SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
1151     if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
1152         ++fBudgetedCount;
1153         fBudgetedBytes += size;
1154 #if GR_CACHE_STATS
1155         fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
1156         fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
1157 #endif
1158         if (!resource->resourcePriv().isPurgeable() &&
1159             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1160             ++fNumBudgetedResourcesFlushWillMakePurgeable;
1161         }
1162         if (resource->cacheAccess().isUsableAsScratch()) {
1163             fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1164         }
1165         this->purgeAsNeeded();
1166     } else {
1167         SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
1168         --fBudgetedCount;
1169         fBudgetedBytes -= size;
1170         if (!resource->resourcePriv().isPurgeable() &&
1171             !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1172             --fNumBudgetedResourcesFlushWillMakePurgeable;
1173         }
1174         if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
1175             resource->resourcePriv().getScratchKey().isValid()) {
1176             fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1177         }
1178     }
1179     SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
1180     TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
1181                    fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
1182 
1183     this->validate();
1184 }
1185 
1186 static constexpr int timeUnit = 1000;
1187 
1188 // OH ISSUE: allow access to release interface
allowToPurge(const std::function<bool (void)> & nextFrameHasArrived)1189 bool GrResourceCache::allowToPurge(const std::function<bool(void)>& nextFrameHasArrived)
1190 {
1191     if (!fEnabled) {
1192         return true;
1193     }
1194     if (fFrameInfo.duringFrame == 0) {
1195         if (nextFrameHasArrived && nextFrameHasArrived()) {
1196             return false;
1197         }
1198         return true;
1199     }
1200     if (fFrameInfo.frameCount != fLastFrameCount) { // the next frame arrives
1201         struct timespec startTime = {0, 0};
1202         if (clock_gettime(CLOCK_REALTIME, &startTime) == -1) {
1203             return true;
1204         }
1205         fStartTime = startTime.tv_sec * timeUnit * timeUnit + startTime.tv_nsec / timeUnit;
1206         fLastFrameCount = fFrameInfo.frameCount;
1207         return true;
1208     }
1209     struct timespec endTime = {0, 0};
1210     if (clock_gettime(CLOCK_REALTIME, &endTime) == -1) {
1211         return true;
1212     }
1213     if (((endTime.tv_sec * timeUnit * timeUnit + endTime.tv_nsec / timeUnit) - fStartTime) >= fOvertimeDuration) {
1214         return false;
1215     }
1216     return true;
1217 }
1218 
purgeAsNeeded(const std::function<bool (void)> & nextFrameHasArrived)1219 void GrResourceCache::purgeAsNeeded(const std::function<bool(void)>& nextFrameHasArrived) {
1220     SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
1221     fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
1222     if (invalidKeyMsgs.count()) {
1223         SkASSERT(fProxyProvider);
1224 
1225         for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
1226             if (invalidKeyMsgs[i].inThreadSafeCache()) {
1227                 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
1228                 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
1229             } else {
1230                 fProxyProvider->processInvalidUniqueKey(
1231                                                     invalidKeyMsgs[i].key(), nullptr,
1232                                                     GrProxyProvider::InvalidateGPUResource::kYes);
1233                 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
1234             }
1235         }
1236     }
1237 
1238     this->processFreedGpuResources();
1239 
1240     bool stillOverbudget = this->overBudget(nextFrameHasArrived);
1241     while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1242         GrGpuResource* resource = fPurgeableQueue.peek();
1243         if (!resource->resourcePriv().isPurgeable()) {
1244             SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable");
1245             continue;
1246         }
1247         SkASSERT(resource->resourcePriv().isPurgeable());
1248         resource->cacheAccess().release();
1249         stillOverbudget = this->overBudget(nextFrameHasArrived);
1250     }
1251 
1252     if (stillOverbudget) {
1253         fThreadSafeCache->dropUniqueRefs(this);
1254 
1255         stillOverbudget = this->overBudget(nextFrameHasArrived);
1256         while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1257             GrGpuResource* resource = fPurgeableQueue.peek();
1258             if (!resource->resourcePriv().isPurgeable()) {
1259                 SkDebugf("OHOS GrResourceCache::purgeAsNeeded() resource is nonPurgeable after dropUniqueRefs");
1260                 continue;
1261             }
1262             SkASSERT(resource->resourcePriv().isPurgeable());
1263             resource->cacheAccess().release();
1264             stillOverbudget = this->overBudget(nextFrameHasArrived);
1265         }
1266     }
1267 
1268     this->validate();
1269 }
1270 
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)1271 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
1272                                              bool scratchResourcesOnly) {
1273 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1274     SimpleCacheInfo simpleCacheInfo;
1275     traceBeforePurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1276 #endif
1277     if (!scratchResourcesOnly) {
1278         if (purgeTime) {
1279             fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
1280         } else {
1281             fThreadSafeCache->dropUniqueRefs(nullptr);
1282         }
1283 
1284         // We could disable maintaining the heap property here, but it would add a lot of
1285         // complexity. Moreover, this is rarely called.
1286         while (fPurgeableQueue.count()) {
1287             GrGpuResource* resource = fPurgeableQueue.peek();
1288 
1289             const GrStdSteadyClock::time_point resourceTime =
1290                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
1291             if (purgeTime && resourceTime >= *purgeTime) {
1292                 // Resources were given both LRU timestamps and tagged with a frame number when
1293                 // they first became purgeable. The LRU timestamp won't change again until the
1294                 // resource is made non-purgeable again. So, at this point all the remaining
1295                 // resources in the timestamp-sorted queue will have a frame number >= to this
1296                 // one.
1297                 break;
1298             }
1299 
1300             SkASSERT(resource->resourcePriv().isPurgeable());
1301             resource->cacheAccess().release();
1302         }
1303     } else {
1304         // Early out if the very first item is too new to purge to avoid sorting the queue when
1305         // nothing will be deleted.
1306         if (purgeTime && fPurgeableQueue.count() &&
1307             fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
1308 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1309             traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1310 #endif
1311             return;
1312         }
1313 
1314         // Sort the queue
1315         fPurgeableQueue.sort();
1316 
1317         // Make a list of the scratch resources to delete
1318         SkTDArray<GrGpuResource*> scratchResources;
1319         for (int i = 0; i < fPurgeableQueue.count(); i++) {
1320             GrGpuResource* resource = fPurgeableQueue.at(i);
1321 
1322             const GrStdSteadyClock::time_point resourceTime =
1323                     resource->cacheAccess().timeWhenResourceBecamePurgeable();
1324             if (purgeTime && resourceTime >= *purgeTime) {
1325                 // scratch or not, all later iterations will be too recently used to purge.
1326                 break;
1327             }
1328             SkASSERT(resource->resourcePriv().isPurgeable());
1329             if (!resource->getUniqueKey().isValid()) {
1330                 *scratchResources.append() = resource;
1331             }
1332         }
1333 
1334         // Delete the scratch resources. This must be done as a separate pass
1335         // to avoid messing up the sorted order of the queue
1336         for (int i = 0; i < scratchResources.count(); i++) {
1337             scratchResources.getAt(i)->cacheAccess().release();
1338         }
1339     }
1340 
1341     this->validate();
1342 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1343     traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1344 #endif
1345 }
1346 
purgeUnlockAndSafeCacheGpuResources()1347 void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
1348 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1349     SimpleCacheInfo simpleCacheInfo;
1350     traceBeforePurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1351 #endif
1352     fThreadSafeCache->dropUniqueRefs(nullptr);
1353     // Sort the queue
1354     fPurgeableQueue.sort();
1355 
1356     //Make a list of the scratch resources to delete
1357     SkTDArray<GrGpuResource*> scratchResources;
1358     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1359         GrGpuResource* resource = fPurgeableQueue.at(i);
1360         if (!resource) {
1361             continue;
1362         }
1363         SkASSERT(resource->resourcePriv().isPurgeable());
1364         if (!resource->getUniqueKey().isValid()) {
1365             *scratchResources.append() = resource;
1366         }
1367     }
1368 
1369     //Delete the scatch resource. This must be done as a separate pass
1370     //to avoid messing up the sorted order of the queue
1371     for (int i = 0; i <scratchResources.count(); i++) {
1372         scratchResources.getAt(i)->cacheAccess().release();
1373     }
1374 
1375     this->validate();
1376 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1377     traceAfterPurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1378 #endif
1379 }
1380 
1381 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)1382 void GrResourceCache::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived)
1383 {
1384     if (!fEnabled) {
1385         return;
1386     }
1387     this->purgeAsNeeded(nextFrameHasArrived);
1388 }
1389 
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)1390 void GrResourceCache::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
1391         const std::set<int>& protectedPidSet) {
1392     HITRACE_OHOS_NAME_FMT_ALWAYS("PurgeGrResourceCache cur=%d, limit=%d", fBudgetedBytes, fMaxBytes);
1393     if (exitedPidSet.size() > 1) {
1394         for (int i = 1; i < fPurgeableQueue.count(); i++) {
1395             GrGpuResource* resource = fPurgeableQueue.at(i);
1396             SkASSERT(resource->resourcePriv().isPurgeable());
1397             if (exitedPidSet.find(resource->getResourceTag().fPid) != exitedPidSet.end()) {
1398                 resource->cacheAccess().release();
1399                 this->validate();
1400                 return;
1401             }
1402         }
1403     }
1404     fPurgeableQueue.sort();
1405     const char* softLimitPercentage = "0.9";
1406     #ifdef NOT_BUILD_FOR_OHOS_SDK
1407     static int softLimit =
1408         std::atof(OHOS::system::GetParameter("persist.sys.graphic.mem.soft_limit",
1409         softLimitPercentage).c_str()) * fMaxBytes;
1410     #else
1411     static int softLimit = 0.9 * fMaxBytes;
1412     #endif
1413     if (fBudgetedBytes >= softLimit) {
1414         for (int i=0; i < fPurgeableQueue.count(); i++) {
1415             GrGpuResource* resource = fPurgeableQueue.at(i);
1416             SkASSERT(resource->resourcePriv().isPurgeable());
1417             if (protectedPidSet.find(resource->getResourceTag().fPid) == protectedPidSet.end()
1418                 && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1419                 resource->cacheAccess().release();
1420                 this->validate();
1421                 return;
1422             }
1423         }
1424     }
1425 }
1426 
purgeUnlockedResourcesByPid(bool scratchResourceOnly,const std::set<int> & exitedPidSet)1427 void GrResourceCache::purgeUnlockedResourcesByPid(bool scratchResourceOnly, const std::set<int>& exitedPidSet) {
1428 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1429     SimpleCacheInfo simpleCacheInfo;
1430     traceBeforePurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1431 #endif
1432     // Sort the queue
1433     fPurgeableQueue.sort();
1434 
1435     //Make lists of the need purged resources to delete
1436     fThreadSafeCache->dropUniqueRefs(nullptr);
1437     SkTDArray<GrGpuResource*> exitPidResources;
1438     SkTDArray<GrGpuResource*> scratchResources;
1439     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1440         GrGpuResource* resource = fPurgeableQueue.at(i);
1441         if (!resource) {
1442             continue;
1443         }
1444         SkASSERT(resource->resourcePriv().isPurgeable());
1445         if (exitedPidSet.count(resource->getResourceTag().fPid)) {
1446             *exitPidResources.append() = resource;
1447         } else if (!resource->getUniqueKey().isValid()) {
1448             *scratchResources.append() = resource;
1449         }
1450     }
1451 
1452     //Delete the exited pid and scatch resource. This must be done as a separate pass
1453     //to avoid messing up the sorted order of the queue
1454     for (int i = 0; i <exitPidResources.count(); i++) {
1455         exitPidResources.getAt(i)->cacheAccess().release();
1456     }
1457     for (int i = 0; i <scratchResources.count(); i++) {
1458         scratchResources.getAt(i)->cacheAccess().release();
1459     }
1460 
1461     for (auto pid : exitedPidSet) {
1462         fExitedPid_.erase(pid);
1463     }
1464 
1465     this->validate();
1466 #if defined (SKIA_OHOS_FOR_OHOS_TRACE) && defined (SKIA_DFX_FOR_OHOS)
1467     traceAfterPurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1468 #endif
1469 }
1470 
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag & tag)1471 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
1472     // Sort the queue
1473     fPurgeableQueue.sort();
1474 
1475     //Make a list of the scratch resources to delete
1476     SkTDArray<GrGpuResource*> scratchResources;
1477     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1478         GrGpuResource* resource = fPurgeableQueue.at(i);
1479         SkASSERT(resource->resourcePriv().isPurgeable());
1480         if (tag.filter(resource->getResourceTag()) && (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1481             *scratchResources.append() = resource;
1482         }
1483     }
1484 
1485     //Delete the scatch resource. This must be done as a separate pass
1486     //to avoid messing up the sorted order of the queue
1487     for (int i = 0; i <scratchResources.count(); i++) {
1488         scratchResources.getAt(i)->cacheAccess().release();
1489     }
1490 
1491     this->validate();
1492 }
1493 
purgeToMakeHeadroom(size_t desiredHeadroomBytes)1494 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
1495     AutoValidate av(this);
1496     if (desiredHeadroomBytes > fMaxBytes) {
1497         return false;
1498     }
1499     if (this->wouldFit(desiredHeadroomBytes)) {
1500         return true;
1501     }
1502     fPurgeableQueue.sort();
1503 
1504     size_t projectedBudget = fBudgetedBytes;
1505     int purgeCnt = 0;
1506     for (int i = 0; i < fPurgeableQueue.count(); i++) {
1507         GrGpuResource* resource = fPurgeableQueue.at(i);
1508         if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1509             projectedBudget -= resource->gpuMemorySize();
1510         }
1511         if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
1512             purgeCnt = i + 1;
1513             break;
1514         }
1515     }
1516     if (purgeCnt == 0) {
1517         return false;
1518     }
1519 
1520     // Success! Release the resources.
1521     // Copy to array first so we don't mess with the queue.
1522     std::vector<GrGpuResource*> resources;
1523     resources.reserve(purgeCnt);
1524     for (int i = 0; i < purgeCnt; i++) {
1525         resources.push_back(fPurgeableQueue.at(i));
1526     }
1527     for (GrGpuResource* resource : resources) {
1528         resource->cacheAccess().release();
1529     }
1530     return true;
1531 }
1532 
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)1533 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
1534 
1535     const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
1536     bool stillOverbudget = tmpByteBudget < fBytes;
1537 
1538     if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
1539         // Sort the queue
1540         fPurgeableQueue.sort();
1541 
1542         // Make a list of the scratch resources to delete
1543         SkTDArray<GrGpuResource*> scratchResources;
1544         size_t scratchByteCount = 0;
1545         for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
1546             GrGpuResource* resource = fPurgeableQueue.at(i);
1547             SkASSERT(resource->resourcePriv().isPurgeable());
1548             if (!resource->getUniqueKey().isValid()) {
1549                 *scratchResources.append() = resource;
1550                 scratchByteCount += resource->gpuMemorySize();
1551                 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
1552             }
1553         }
1554 
1555         // Delete the scratch resources. This must be done as a separate pass
1556         // to avoid messing up the sorted order of the queue
1557         for (int i = 0; i < scratchResources.count(); i++) {
1558             scratchResources.getAt(i)->cacheAccess().release();
1559         }
1560         stillOverbudget = tmpByteBudget < fBytes;
1561 
1562         this->validate();
1563     }
1564 
1565     // Purge any remaining resources in LRU order
1566     if (stillOverbudget) {
1567         const size_t cachedByteCount = fMaxBytes;
1568         fMaxBytes = tmpByteBudget;
1569         this->purgeAsNeeded();
1570         fMaxBytes = cachedByteCount;
1571     }
1572 }
1573 
requestsFlush() const1574 bool GrResourceCache::requestsFlush() const {
1575     return this->overBudget() && !fPurgeableQueue.count() &&
1576            fNumBudgetedResourcesFlushWillMakePurgeable > 0;
1577 }
1578 
insertDelayedTextureUnref(GrTexture * texture)1579 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
1580     texture->ref();
1581     uint32_t id = texture->uniqueID().asUInt();
1582     if (auto* data = fTexturesAwaitingUnref.find(id)) {
1583         data->addRef();
1584     } else {
1585         fTexturesAwaitingUnref.set(id, {texture});
1586     }
1587 }
1588 
processFreedGpuResources()1589 void GrResourceCache::processFreedGpuResources() {
1590     if (!fTexturesAwaitingUnref.count()) {
1591         return;
1592     }
1593 
1594     SkTArray<GrTextureFreedMessage> msgs;
1595     fFreedTextureInbox.poll(&msgs);
1596     for (int i = 0; i < msgs.count(); ++i) {
1597         SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
1598         uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
1599         TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
1600         // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
1601         // empty and we would have returned early above. Thus, any texture from a message should be
1602         // in the list of fTexturesAwaitingUnref.
1603         SkASSERT(info);
1604         info->unref();
1605         if (info->finished()) {
1606             fTexturesAwaitingUnref.remove(id);
1607         }
1608     }
1609 }
1610 
addToNonpurgeableArray(GrGpuResource * resource)1611 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
1612     int index = fNonpurgeableResources.count();
1613     *fNonpurgeableResources.append() = resource;
1614     *resource->cacheAccess().accessCacheIndex() = index;
1615 }
1616 
removeFromNonpurgeableArray(GrGpuResource * resource)1617 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
1618     int* index = resource->cacheAccess().accessCacheIndex();
1619     // Fill the hole we will create in the array with the tail object, adjust its index, and
1620     // then pop the array
1621     GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
1622     SkASSERT(fNonpurgeableResources[*index] == resource);
1623     fNonpurgeableResources[*index] = tail;
1624     *tail->cacheAccess().accessCacheIndex() = *index;
1625     fNonpurgeableResources.pop();
1626     SkDEBUGCODE(*index = -1);
1627 }
1628 
getNextTimestamp()1629 uint32_t GrResourceCache::getNextTimestamp() {
1630     // If we wrap then all the existing resources will appear older than any resources that get
1631     // a timestamp after the wrap.
1632     if (0 == fTimestamp) {
1633         int count = this->getResourceCount();
1634         if (count) {
1635             // Reset all the timestamps. We sort the resources by timestamp and then assign
1636             // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
1637             // rare.
1638             SkTDArray<GrGpuResource*> sortedPurgeableResources;
1639             sortedPurgeableResources.setReserve(fPurgeableQueue.count());
1640 
1641             while (fPurgeableQueue.count()) {
1642                 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
1643                 fPurgeableQueue.pop();
1644             }
1645 
1646             SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
1647                      CompareTimestamp);
1648 
1649             // Pick resources out of the purgeable and non-purgeable arrays based on lowest
1650             // timestamp and assign new timestamps.
1651             int currP = 0;
1652             int currNP = 0;
1653             while (currP < sortedPurgeableResources.count() &&
1654                    currNP < fNonpurgeableResources.count()) {
1655                 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
1656                 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
1657                 SkASSERT(tsP != tsNP);
1658                 if (tsP < tsNP) {
1659                     sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1660                 } else {
1661                     // Correct the index in the nonpurgeable array stored on the resource post-sort.
1662                     *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1663                     fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1664                 }
1665             }
1666 
1667             // The above loop ended when we hit the end of one array. Finish the other one.
1668             while (currP < sortedPurgeableResources.count()) {
1669                 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1670             }
1671             while (currNP < fNonpurgeableResources.count()) {
1672                 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1673                 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1674             }
1675 
1676             // Rebuild the queue.
1677             for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
1678                 fPurgeableQueue.insert(sortedPurgeableResources[i]);
1679             }
1680 
1681             this->validate();
1682             SkASSERT(count == this->getResourceCount());
1683 
1684             // count should be the next timestamp we return.
1685             SkASSERT(fTimestamp == SkToU32(count));
1686         }
1687     }
1688     return fTimestamp++;
1689 }
1690 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const1691 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
1692     SkTDArray<GrGpuResource*> resources;
1693     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1694         *resources.append() = fNonpurgeableResources[i];
1695     }
1696     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1697         *resources.append() = fPurgeableQueue.at(i);
1698     }
1699     for (int i = 0; i < resources.count(); i++) {
1700         auto resource = resources.getAt(i);
1701         if (!resource || resource->wasDestroyed()) {
1702             continue;
1703         }
1704         resource->dumpMemoryStatistics(traceMemoryDump);
1705     }
1706 }
1707 
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const1708 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
1709     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1710         if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
1711             fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1712         }
1713     }
1714     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1715         if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
1716             fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1717         }
1718     }
1719 }
1720 
1721 #if GR_CACHE_STATS
getStats(Stats * stats) const1722 void GrResourceCache::getStats(Stats* stats) const {
1723     stats->reset();
1724 
1725     stats->fTotal = this->getResourceCount();
1726     stats->fNumNonPurgeable = fNonpurgeableResources.count();
1727     stats->fNumPurgeable = fPurgeableQueue.count();
1728 
1729     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1730         stats->update(fNonpurgeableResources[i]);
1731     }
1732     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1733         stats->update(fPurgeableQueue.at(i));
1734     }
1735 }
1736 
1737 #if GR_TEST_UTILS
dumpStats(SkString * out) const1738 void GrResourceCache::dumpStats(SkString* out) const {
1739     this->validate();
1740 
1741     Stats stats;
1742 
1743     this->getStats(&stats);
1744 
1745     float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1746 
1747     out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1748     out->appendf("\t\tEntry Count: current %d"
1749                  " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1750                  stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1751                  stats.fScratch, fHighWaterCount);
1752     out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1753                  SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1754                  SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1755 }
1756 
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const1757 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1758                                              SkTArray<double>* values) const {
1759     this->validate();
1760 
1761     Stats stats;
1762     this->getStats(&stats);
1763 
1764     keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1765 }
1766 #endif // GR_TEST_UTILS
1767 #endif // GR_CACHE_STATS
1768 
1769 #ifdef SK_DEBUG
validate() const1770 void GrResourceCache::validate() const {
1771     // Reduce the frequency of validations for large resource counts.
1772     static SkRandom gRandom;
1773     int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1774     if (~mask && (gRandom.nextU() & mask)) {
1775         return;
1776     }
1777 
1778     struct Stats {
1779         size_t fBytes;
1780         int fBudgetedCount;
1781         size_t fBudgetedBytes;
1782         int fLocked;
1783         int fScratch;
1784         int fCouldBeScratch;
1785         int fContent;
1786         const ScratchMap* fScratchMap;
1787         const UniqueHash* fUniqueHash;
1788 
1789         Stats(const GrResourceCache* cache) {
1790             memset(this, 0, sizeof(*this));
1791             fScratchMap = &cache->fScratchMap;
1792             fUniqueHash = &cache->fUniqueHash;
1793         }
1794 
1795         void update(GrGpuResource* resource) {
1796             fBytes += resource->gpuMemorySize();
1797 
1798             if (!resource->resourcePriv().isPurgeable()) {
1799                 ++fLocked;
1800             }
1801 
1802             const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1803             const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1804 
1805             if (resource->cacheAccess().isUsableAsScratch()) {
1806                 SkASSERT(!uniqueKey.isValid());
1807                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1808                 SkASSERT(!resource->cacheAccess().hasRef());
1809                 ++fScratch;
1810                 SkASSERT(fScratchMap->countForKey(scratchKey));
1811                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1812             } else if (scratchKey.isValid()) {
1813                 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1814                          uniqueKey.isValid() || resource->cacheAccess().hasRef());
1815                 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1816                 SkASSERT(!fScratchMap->has(resource, scratchKey));
1817             }
1818             if (uniqueKey.isValid()) {
1819                 ++fContent;
1820                 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1821                 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1822                          resource->resourcePriv().refsWrappedObjects());
1823             }
1824 
1825             if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1826                 ++fBudgetedCount;
1827                 fBudgetedBytes += resource->gpuMemorySize();
1828             }
1829         }
1830     };
1831 
1832     {
1833         int count = 0;
1834         fScratchMap.foreach([&](const GrGpuResource& resource) {
1835             SkASSERT(resource.cacheAccess().isUsableAsScratch());
1836             count++;
1837         });
1838         SkASSERT(count == fScratchMap.count());
1839     }
1840 
1841     Stats stats(this);
1842     size_t purgeableBytes = 0;
1843     int numBudgetedResourcesFlushWillMakePurgeable = 0;
1844 
1845     for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1846         SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1847                  fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1848         SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1849         SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1850         if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1851             !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1852             fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1853             ++numBudgetedResourcesFlushWillMakePurgeable;
1854         }
1855         stats.update(fNonpurgeableResources[i]);
1856     }
1857     for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1858         SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1859         SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1860         SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1861         stats.update(fPurgeableQueue.at(i));
1862         purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1863     }
1864 
1865     SkASSERT(fCount == this->getResourceCount());
1866     SkASSERT(fBudgetedCount <= fCount);
1867     SkASSERT(fBudgetedBytes <= fBytes);
1868     SkASSERT(stats.fBytes == fBytes);
1869     SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1870              numBudgetedResourcesFlushWillMakePurgeable);
1871     SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1872     SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1873     SkASSERT(purgeableBytes == fPurgeableBytes);
1874 #if GR_CACHE_STATS
1875     SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1876     SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1877     SkASSERT(fBytes <= fHighWaterBytes);
1878     SkASSERT(fCount <= fHighWaterCount);
1879     SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1880     SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1881 #endif
1882     SkASSERT(stats.fContent == fUniqueHash.count());
1883     SkASSERT(stats.fScratch == fScratchMap.count());
1884 
1885     // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1886     // calls. This will be fixed when subresource registration is explicit.
1887     // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1888     // SkASSERT(!overBudget || locked == count || fPurging);
1889 }
1890 #endif // SK_DEBUG
1891 
isInCache(const GrGpuResource * resource) const1892 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1893     int index = *resource->cacheAccess().accessCacheIndex();
1894     if (index < 0) {
1895         return false;
1896     }
1897     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1898         return true;
1899     }
1900     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1901         return true;
1902     }
1903     SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1904     return false;
1905 }
1906 
isInPurgeableCache(const GrGpuResource * resource) const1907 bool GrResourceCache::isInPurgeableCache(const GrGpuResource* resource) const {
1908     int index = *resource->cacheAccess().accessCacheIndex();
1909     if (index < 0) {
1910         return false;
1911     }
1912     if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1913         return true;
1914     }
1915     SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1916     return false;
1917 }
1918 
isInNonpurgeableCache(const GrGpuResource * resource) const1919 bool GrResourceCache::isInNonpurgeableCache(const GrGpuResource* resource) const {
1920     int index = *resource->cacheAccess().accessCacheIndex();
1921     if (index < 0) {
1922         return false;
1923     }
1924     if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1925         return true;
1926     }
1927     SkDEBUGFAIL("OHOS Resource index should be -1 or the resource should be in the cache.");
1928     return false;
1929 }
1930 
1931 #if GR_TEST_UTILS
1932 
countUniqueKeysWithTag(const char * tag) const1933 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1934     int count = 0;
1935     fUniqueHash.foreach([&](const GrGpuResource& resource){
1936         if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1937             ++count;
1938         }
1939     });
1940     return count;
1941 }
1942 
changeTimestamp(uint32_t newTimestamp)1943 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1944     fTimestamp = newTimestamp;
1945 }
1946 
1947 #endif // GR_TEST_UTILS
1948