1 /*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/GrResourceCache.h"
9 #include <atomic>
10 #include <ctime>
11 #include <vector>
12 #include <map>
13 #include <sstream>
14 #ifdef NOT_BUILD_FOR_OHOS_SDK
15 #include <parameters.h>
16 #endif
17 #include "include/core/SkString.h"
18 #include "include/gpu/GrDirectContext.h"
19 #include "include/private/GrSingleOwner.h"
20 #include "include/private/SkTo.h"
21 #include "include/utils/SkRandom.h"
22 #include "src/core/SkMessageBus.h"
23 #include "src/core/SkOpts.h"
24 #include "src/core/SkScopeExit.h"
25 #include "src/core/SkTSort.h"
26 #include "src/gpu/GrCaps.h"
27 #include "src/gpu/GrDirectContextPriv.h"
28 #include "src/gpu/GrGpuResourceCacheAccess.h"
29 #include "src/gpu/GrProxyProvider.h"
30 #ifdef SKIA_OHOS
31 #include "src/gpu/GrPerfMonitorReporter.h"
32 #endif
33 #include "src/gpu/GrTexture.h"
34 #include "src/gpu/GrTextureProxyCacheAccess.h"
35 #include "src/gpu/GrThreadSafeCache.h"
36 #include "src/gpu/GrTracing.h"
37 #include "src/gpu/SkGr.h"
38 #ifdef SK_VULKAN
39 #include "src/gpu/vk/GrVkImage.h"
40 #endif
41
42 DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
43
44 DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
45
46 #define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
47
48 //////////////////////////////////////////////////////////////////////////////
49
GenerateResourceType()50 GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
51 static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
52
53 int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
54 if (type > SkTo<int32_t>(UINT16_MAX)) {
55 SK_ABORT("Too many Resource Types");
56 }
57
58 return static_cast<ResourceType>(type);
59 }
60
GenerateDomain()61 GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
62 static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
63
64 int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
65 if (domain > SkTo<int32_t>(UINT16_MAX)) {
66 SK_ABORT("Too many GrUniqueKey Domains");
67 }
68
69 return static_cast<Domain>(domain);
70 }
71
GrResourceKeyHash(const uint32_t * data,size_t size)72 uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
73 return SkOpts::hash(data, size);
74 }
75
76 //////////////////////////////////////////////////////////////////////////////
77
78 class GrResourceCache::AutoValidate : ::SkNoncopyable {
79 public:
AutoValidate(GrResourceCache * cache)80 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
~AutoValidate()81 ~AutoValidate() { fCache->validate(); }
82 private:
83 GrResourceCache* fCache;
84 };
85
86 //////////////////////////////////////////////////////////////////////////////
87
88 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
89
TextureAwaitingUnref(GrTexture * texture)90 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
91 : fTexture(texture), fNumUnrefs(1) {}
92
TextureAwaitingUnref(TextureAwaitingUnref && that)93 inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
94 fTexture = std::exchange(that.fTexture, nullptr);
95 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
96 }
97
operator =(TextureAwaitingUnref && that)98 inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
99 TextureAwaitingUnref&& that) {
100 fTexture = std::exchange(that.fTexture, nullptr);
101 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
102 return *this;
103 }
104
~TextureAwaitingUnref()105 inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
106 if (fTexture) {
107 for (int i = 0; i < fNumUnrefs; ++i) {
108 fTexture->unref();
109 }
110 }
111 }
112
addRef()113 inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
114
unref()115 inline void GrResourceCache::TextureAwaitingUnref::unref() {
116 SkASSERT(fNumUnrefs > 0);
117 fTexture->unref();
118 --fNumUnrefs;
119 }
120
finished()121 inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
122
123 //////////////////////////////////////////////////////////////////////////////
124
GrResourceCache(GrSingleOwner * singleOwner,GrDirectContext::DirectContextID owningContextID,uint32_t familyID)125 GrResourceCache::GrResourceCache(GrSingleOwner* singleOwner,
126 GrDirectContext::DirectContextID owningContextID,
127 uint32_t familyID)
128 : fInvalidUniqueKeyInbox(familyID)
129 , fFreedTextureInbox(owningContextID)
130 , fOwningContextID(owningContextID)
131 , fContextUniqueID(familyID)
132 , fSingleOwner(singleOwner) {
133 SkASSERT(owningContextID.isValid());
134 SkASSERT(familyID != SK_InvalidUniqueID);
135 #ifdef NOT_BUILD_FOR_OHOS_SDK
136 static int overtimeDuration = std::atoi(
137 OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_overtime", "600")
138 .c_str());
139 static double maxBytesRate = std::atof(
140 OHOS::system::GetParameter("persist.sys.graphic.mem.async_free_cache_max_rate", "0.9")
141 .c_str());
142 #else
143 static int overtimeDuration = 600;
144 static double maxBytesRate = 0.9;
145 #endif
146 fMaxBytesRate = maxBytesRate;
147 fOvertimeDuration = overtimeDuration;
148 }
149
~GrResourceCache()150 GrResourceCache::~GrResourceCache() {
151 this->releaseAll();
152 }
153
setLimit(size_t bytes)154 void GrResourceCache::setLimit(size_t bytes) {
155 fMaxBytes = bytes;
156 this->purgeAsNeeded();
157 }
158
159 #ifdef SKIA_DFX_FOR_OHOS
160 static constexpr int MB = 1024 * 1024;
161
162 #ifdef SKIA_OHOS
163 bool GrResourceCache::purgeUnlocakedResTraceEnabled_ =
164 std::atoi((OHOS::system::GetParameter("sys.graphic.skia.cache.debug", "0").c_str())) == 1;
165 #endif
166
dumpInfo(SkString * out)167 void GrResourceCache::dumpInfo(SkString* out) {
168 if (out == nullptr) {
169 SkDebugf("OHOS GrResourceCache::dumpInfo outPtr is nullptr!");
170 return;
171 }
172 auto info = cacheInfo();
173 constexpr uint8_t STEP_INDEX = 1;
174 SkTArray<SkString> lines;
175 SkStrSplit(info.substr(STEP_INDEX, info.length() - STEP_INDEX).c_str(), ";", &lines);
176 for (int i = 0; i < lines.size(); ++i) {
177 out->appendf(" %s\n", lines[i].c_str());
178 }
179 }
180
cacheInfo()181 std::string GrResourceCache::cacheInfo()
182 {
183 auto fPurgeableQueueInfoStr = cacheInfoPurgeableQueue();
184 auto fNonpurgeableResourcesInfoStr = cacheInfoNoPurgeableQueue();
185
186 std::ostringstream cacheInfoStream;
187 cacheInfoStream << "[fPurgeableQueueInfoStr.count : " << fPurgeableQueue.count()
188 << "; fNonpurgeableResources.count : " << fNonpurgeableResources.count()
189 << "; fBudgetedBytes : " << fBudgetedBytes
190 << "(" << static_cast<size_t>(fBudgetedBytes / MB)
191 << " MB) / " << fMaxBytes
192 << "(" << static_cast<size_t>(fMaxBytes / MB)
193 << " MB); fBudgetedCount : " << fBudgetedCount
194 << "; fBytes : " << fBytes
195 << "(" << static_cast<size_t>(fBytes / MB)
196 << " MB); fPurgeableBytes : " << fPurgeableBytes
197 << "(" << static_cast<size_t>(fPurgeableBytes / MB)
198 << " MB); fAllocImageBytes : " << fAllocImageBytes
199 << "(" << static_cast<size_t>(fAllocImageBytes / MB)
200 << " MB); fAllocBufferBytes : " << fAllocBufferBytes
201 << "(" << static_cast<size_t>(fAllocBufferBytes / MB)
202 << " MB); fTimestamp : " << fTimestamp
203 << "; " << fPurgeableQueueInfoStr << "; " << fNonpurgeableResourcesInfoStr;
204 return cacheInfoStream.str();
205 }
206
207 #ifdef SKIA_OHOS
traceBeforePurgeUnlockRes(const std::string & method,SimpleCacheInfo & simpleCacheInfo)208 void GrResourceCache::traceBeforePurgeUnlockRes(const std::string& method, SimpleCacheInfo& simpleCacheInfo)
209 {
210 if (purgeUnlocakedResTraceEnabled_) {
211 StartTrace(HITRACE_TAG_GRAPHIC_AGP, method + " begin cacheInfo = " + cacheInfo());
212 } else {
213 simpleCacheInfo.fPurgeableQueueCount = fPurgeableQueue.count();
214 simpleCacheInfo.fNonpurgeableResourcesCount = fNonpurgeableResources.count();
215 simpleCacheInfo.fPurgeableBytes = fPurgeableBytes;
216 simpleCacheInfo.fBudgetedCount = fBudgetedCount;
217 simpleCacheInfo.fBudgetedBytes = fBudgetedBytes;
218 simpleCacheInfo.fAllocImageBytes = fAllocImageBytes;
219 simpleCacheInfo.fAllocBufferBytes = fAllocBufferBytes;
220 }
221 }
222
traceAfterPurgeUnlockRes(const std::string & method,const SimpleCacheInfo & simpleCacheInfo)223 void GrResourceCache::traceAfterPurgeUnlockRes(const std::string& method, const SimpleCacheInfo& simpleCacheInfo)
224 {
225 if (purgeUnlocakedResTraceEnabled_) {
226 HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s", method.c_str(), cacheInfo().c_str());
227 FinishTrace(HITRACE_TAG_GRAPHIC_AGP);
228 } else {
229 HITRACE_OHOS_NAME_FMT_ALWAYS("%s end cacheInfo = %s",
230 method.c_str(), cacheInfoComparison(simpleCacheInfo).c_str());
231 }
232 }
233
cacheInfoComparison(const SimpleCacheInfo & simpleCacheInfo)234 std::string GrResourceCache::cacheInfoComparison(const SimpleCacheInfo& simpleCacheInfo)
235 {
236 std::ostringstream cacheInfoComparison;
237 cacheInfoComparison << "PurgeableCount : " << simpleCacheInfo.fPurgeableQueueCount
238 << " / " << fPurgeableQueue.count()
239 << "; NonpurgeableCount : " << simpleCacheInfo.fNonpurgeableResourcesCount
240 << " / " << fNonpurgeableResources.count()
241 << "; PurgeableBytes : " << simpleCacheInfo.fPurgeableBytes << " / " << fPurgeableBytes
242 << "; BudgetedCount : " << simpleCacheInfo.fBudgetedCount << " / " << fBudgetedCount
243 << "; BudgetedBytes : " << simpleCacheInfo.fBudgetedBytes << " / " << fBudgetedBytes
244 << "; AllocImageBytes : " << simpleCacheInfo.fAllocImageBytes << " / " << fAllocImageBytes
245 << "; AllocBufferBytes : " << simpleCacheInfo.fAllocBufferBytes << " / " << fAllocBufferBytes;
246 return cacheInfoComparison.str();
247 }
248 #endif // SKIA_OHOS
249
cacheInfoPurgeableQueue()250 std::string GrResourceCache::cacheInfoPurgeableQueue()
251 {
252 std::map<uint64_t, size_t> purgSizeInfoWid;
253 std::map<uint64_t, int> purgCountInfoWid;
254 std::map<uint64_t, std::string> purgNameInfoWid;
255 std::map<uint64_t, int> purgPidInfoWid;
256
257 std::map<uint32_t, size_t> purgSizeInfoPid;
258 std::map<uint32_t, int> purgCountInfoPid;
259 std::map<uint32_t, std::string> purgNameInfoPid;
260
261 std::map<uint32_t, size_t> purgSizeInfoFid;
262 std::map<uint32_t, int> purgCountInfoFid;
263 std::map<uint32_t, std::string> purgNameInfoFid;
264
265 int purgCountUnknown = 0;
266 size_t purgSizeUnknown = 0;
267
268 for (int i = 0; i < fPurgeableQueue.count(); i++) {
269 auto resource = fPurgeableQueue.at(i);
270 #ifdef SK_VULKAN
271 if (std::strcmp(resource->getResourceType(), "VkImage") == 0) {
272 auto vkimage = static_cast<GrVkImage*>(resource);
273 if (vkimage->supportedUsages() & GrAttachment::UsageFlags::kTexture) {
274 continue;
275 }
276 }
277 #endif
278 auto resourceTag = resource->getResourceTag();
279 if (resourceTag.fWid != 0) {
280 updatePurgeableWidMap(resource, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
281 } else if (resourceTag.fFid != 0) {
282 updatePurgeableFidMap(resource, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
283 if (resourceTag.fPid != 0) {
284 updatePurgeablePidMap(resource, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
285 }
286 } else {
287 purgCountUnknown++;
288 purgSizeUnknown += resource->gpuMemorySize();
289 }
290 }
291
292 std::string infoStr;
293 if (purgSizeInfoWid.size() > 0) {
294 infoStr += ";PurgeableInfo_Node:[";
295 updatePurgeableWidInfo(infoStr, purgNameInfoWid, purgSizeInfoWid, purgPidInfoWid, purgCountInfoWid);
296 }
297 if (purgSizeInfoPid.size() > 0) {
298 infoStr += ";PurgeableInfo_Pid:[";
299 updatePurgeablePidInfo(infoStr, purgNameInfoPid, purgSizeInfoPid, purgCountInfoPid);
300 }
301 if (purgSizeInfoFid.size() > 0) {
302 infoStr += ";PurgeableInfo_Fid:[";
303 updatePurgeableFidInfo(infoStr, purgNameInfoFid, purgSizeInfoFid, purgCountInfoFid);
304 }
305 updatePurgeableUnknownInfo(infoStr, ";PurgeableInfo_Unknown:", purgCountUnknown, purgSizeUnknown);
306 return infoStr;
307 }
308
cacheInfoNoPurgeableQueue()309 std::string GrResourceCache::cacheInfoNoPurgeableQueue()
310 {
311 std::map<uint64_t, size_t> noPurgSizeInfoWid;
312 std::map<uint64_t, int> noPurgCountInfoWid;
313 std::map<uint64_t, std::string> noPurgNameInfoWid;
314 std::map<uint64_t, int> noPurgPidInfoWid;
315
316 std::map<uint32_t, size_t> noPurgSizeInfoPid;
317 std::map<uint32_t, int> noPurgCountInfoPid;
318 std::map<uint32_t, std::string> noPurgNameInfoPid;
319
320 std::map<uint32_t, size_t> noPurgSizeInfoFid;
321 std::map<uint32_t, int> noPurgCountInfoFid;
322 std::map<uint32_t, std::string> noPurgNameInfoFid;
323
324 int noPurgCountUnknown = 0;
325 size_t noPurgSizeUnknown = 0;
326
327 for (int i = 0; i < fNonpurgeableResources.count(); i++) {
328 auto resource = fNonpurgeableResources[i];
329 if (resource == nullptr) {
330 continue;
331 }
332 #ifdef SK_VULKAN
333 if (std::strcmp(resource->getResourceType(), "VkImage") == 0) {
334 auto vkimage = static_cast<GrVkImage*>(resource);
335 if (vkimage->supportedUsages() & GrAttachment::UsageFlags::kTexture) {
336 continue;
337 }
338 }
339 #endif
340 auto resourceTag = resource->getResourceTag();
341 if (resourceTag.fWid != 0) {
342 updatePurgeableWidMap(resource, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
343 } else if (resourceTag.fFid != 0) {
344 updatePurgeableFidMap(resource, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
345 if (resourceTag.fPid != 0) {
346 updatePurgeablePidMap(resource, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
347 }
348 } else {
349 noPurgCountUnknown++;
350 noPurgSizeUnknown += resource->gpuMemorySize();
351 }
352 }
353
354 std::string infoStr;
355 if (noPurgSizeInfoWid.size() > 0) {
356 infoStr += ";NonPurgeableInfo_Node:[";
357 updatePurgeableWidInfo(infoStr, noPurgNameInfoWid, noPurgSizeInfoWid, noPurgPidInfoWid, noPurgCountInfoWid);
358 }
359 if (noPurgSizeInfoPid.size() > 0) {
360 infoStr += ";NonPurgeableInfo_Pid:[";
361 updatePurgeablePidInfo(infoStr, noPurgNameInfoPid, noPurgSizeInfoPid, noPurgCountInfoPid);
362 }
363 if (noPurgSizeInfoFid.size() > 0) {
364 infoStr += ";NonPurgeableInfo_Fid:[";
365 updatePurgeableFidInfo(infoStr, noPurgNameInfoFid, noPurgSizeInfoFid, noPurgCountInfoFid);
366 }
367 updatePurgeableUnknownInfo(infoStr, ";NonPurgeableInfo_Unknown:", noPurgCountUnknown, noPurgSizeUnknown);
368 return infoStr;
369 }
370
updatePurgeableWidMap(GrGpuResource * resource,std::map<uint64_t,std::string> & nameInfoWid,std::map<uint64_t,size_t> & sizeInfoWid,std::map<uint64_t,int> & pidInfoWid,std::map<uint64_t,int> & countInfoWid)371 void GrResourceCache::updatePurgeableWidMap(GrGpuResource* resource,
372 std::map<uint64_t, std::string>& nameInfoWid,
373 std::map<uint64_t, size_t>& sizeInfoWid,
374 std::map<uint64_t, int>& pidInfoWid,
375 std::map<uint64_t, int>& countInfoWid)
376 {
377 auto resourceTag = resource->getResourceTag();
378 auto it = sizeInfoWid.find(resourceTag.fWid);
379 if (it != sizeInfoWid.end()) {
380 sizeInfoWid[resourceTag.fWid] = it->second + resource->gpuMemorySize();
381 countInfoWid[resourceTag.fWid]++;
382 } else {
383 sizeInfoWid[resourceTag.fWid] = resource->gpuMemorySize();
384 nameInfoWid[resourceTag.fWid] = resourceTag.fName;
385 pidInfoWid[resourceTag.fWid] = resourceTag.fPid;
386 countInfoWid[resourceTag.fWid] = 1;
387 }
388 }
389
updatePurgeablePidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)390 void GrResourceCache::updatePurgeablePidMap(GrGpuResource* resource,
391 std::map<uint32_t, std::string>& nameInfoPid,
392 std::map<uint32_t, size_t>& sizeInfoPid,
393 std::map<uint32_t, int>& countInfoPid)
394 {
395 auto resourceTag = resource->getResourceTag();
396 auto it = sizeInfoPid.find(resourceTag.fPid);
397 if (it != sizeInfoPid.end()) {
398 sizeInfoPid[resourceTag.fPid] = it->second + resource->gpuMemorySize();
399 countInfoPid[resourceTag.fPid]++;
400 } else {
401 sizeInfoPid[resourceTag.fPid] = resource->gpuMemorySize();
402 nameInfoPid[resourceTag.fPid] = resourceTag.fName;
403 countInfoPid[resourceTag.fPid] = 1;
404 }
405 }
406
updatePurgeableFidMap(GrGpuResource * resource,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)407 void GrResourceCache::updatePurgeableFidMap(GrGpuResource* resource,
408 std::map<uint32_t, std::string>& nameInfoFid,
409 std::map<uint32_t, size_t>& sizeInfoFid,
410 std::map<uint32_t, int>& countInfoFid)
411 {
412 auto resourceTag = resource->getResourceTag();
413 auto it = sizeInfoFid.find(resourceTag.fFid);
414 if (it != sizeInfoFid.end()) {
415 sizeInfoFid[resourceTag.fFid] = it->second + resource->gpuMemorySize();
416 countInfoFid[resourceTag.fFid]++;
417 } else {
418 sizeInfoFid[resourceTag.fFid] = resource->gpuMemorySize();
419 nameInfoFid[resourceTag.fFid] = resourceTag.fName;
420 countInfoFid[resourceTag.fFid] = 1;
421 }
422 }
423
updatePurgeableWidInfo(std::string & infoStr,std::map<uint64_t,std::string> & nameInfoWid,std::map<uint64_t,size_t> & sizeInfoWid,std::map<uint64_t,int> & pidInfoWid,std::map<uint64_t,int> & countInfoWid)424 void GrResourceCache::updatePurgeableWidInfo(std::string& infoStr,
425 std::map<uint64_t, std::string>& nameInfoWid,
426 std::map<uint64_t, size_t>& sizeInfoWid,
427 std::map<uint64_t, int>& pidInfoWid,
428 std::map<uint64_t, int>& countInfoWid)
429 {
430 for (auto it = sizeInfoWid.begin(); it != sizeInfoWid.end(); it++) {
431 infoStr += "[" + nameInfoWid[it->first] +
432 ",pid=" + std::to_string(pidInfoWid[it->first]) +
433 ",NodeId=" + std::to_string(it->first) +
434 ",count=" + std::to_string(countInfoWid[it->first]) +
435 ",size=" + std::to_string(it->second) +
436 "(" + std::to_string(it->second / MB) + " MB)],";
437 }
438 infoStr += ']';
439 }
440
updatePurgeablePidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoPid,std::map<uint32_t,size_t> & sizeInfoPid,std::map<uint32_t,int> & countInfoPid)441 void GrResourceCache::updatePurgeablePidInfo(std::string& infoStr,
442 std::map<uint32_t, std::string>& nameInfoPid,
443 std::map<uint32_t, size_t>& sizeInfoPid,
444 std::map<uint32_t, int>& countInfoPid)
445 {
446 for (auto it = sizeInfoPid.begin(); it != sizeInfoPid.end(); it++) {
447 infoStr += "[" + nameInfoPid[it->first] +
448 ",pid=" + std::to_string(it->first) +
449 ",count=" + std::to_string(countInfoPid[it->first]) +
450 ",size=" + std::to_string(it->second) +
451 "(" + std::to_string(it->second / MB) + " MB)],";
452 }
453 infoStr += ']';
454 }
455
updatePurgeableFidInfo(std::string & infoStr,std::map<uint32_t,std::string> & nameInfoFid,std::map<uint32_t,size_t> & sizeInfoFid,std::map<uint32_t,int> & countInfoFid)456 void GrResourceCache::updatePurgeableFidInfo(std::string& infoStr,
457 std::map<uint32_t, std::string>& nameInfoFid,
458 std::map<uint32_t, size_t>& sizeInfoFid,
459 std::map<uint32_t, int>& countInfoFid)
460 {
461 for (auto it = sizeInfoFid.begin(); it != sizeInfoFid.end(); it++) {
462 infoStr += "[" + nameInfoFid[it->first] +
463 ",typeid=" + std::to_string(it->first) +
464 ",count=" + std::to_string(countInfoFid[it->first]) +
465 ",size=" + std::to_string(it->second) +
466 "(" + std::to_string(it->second / MB) + " MB)],";
467 }
468 infoStr += ']';
469 }
470
updatePurgeableUnknownInfo(std::string & infoStr,const std::string & unknownPrefix,const int countUnknown,const size_t sizeUnknown)471 void GrResourceCache::updatePurgeableUnknownInfo(
472 std::string& infoStr, const std::string& unknownPrefix, const int countUnknown, const size_t sizeUnknown)
473 {
474 if (countUnknown > 0) {
475 infoStr += unknownPrefix +
476 "[count=" + std::to_string(countUnknown) +
477 ",size=" + std::to_string(sizeUnknown) +
478 "(" + std::to_string(sizeUnknown / MB) + "MB)]";
479 }
480 }
481 #endif
482
insertResource(GrGpuResource * resource)483 void GrResourceCache::insertResource(GrGpuResource* resource)
484 {
485 ASSERT_SINGLE_OWNER
486 SkASSERT(resource);
487 SkASSERT(!this->isInCache(resource));
488 SkASSERT(!resource->wasDestroyed());
489 SkASSERT(!resource->resourcePriv().isPurgeable());
490
491 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
492 // up iterating over all the resources that already have timestamps.
493 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
494
495 this->addToNonpurgeableArray(resource);
496
497 size_t size = resource->gpuMemorySize();
498 SkDEBUGCODE(++fCount;)
499 fBytes += size;
500
501 // OH ISSUE: memory count
502 auto pid = resource->getResourceTag().fPid;
503 if (pid && resource->isRealAlloc()) {
504 auto& pidSize = fBytesOfPid[pid];
505 pidSize += size;
506 fUpdatedBytesOfPid[pid] = pidSize;
507 if (pidSize >= fMemoryControl_ && fExitedPid_.find(pid) == fExitedPid_.end() && fMemoryOverflowCallback_) {
508 fMemoryOverflowCallback_(pid, pidSize, true);
509 fExitedPid_.insert(pid);
510 SkDebugf("OHOS resource overflow! pid[%{public}d], size[%{public}zu]", pid, pidSize);
511 #ifdef SKIA_OHOS
512 HITRACE_OHOS_NAME_FMT_ALWAYS("OHOS gpu resource overflow: pid(%u), size:(%u)", pid, pidSize);
513 #endif
514 }
515 }
516
517 #if GR_CACHE_STATS
518 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
519 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
520 #endif
521 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
522 ++fBudgetedCount;
523 fBudgetedBytes += size;
524 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
525 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
526 #if GR_CACHE_STATS
527 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
528 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
529 #endif
530 }
531 SkASSERT(!resource->cacheAccess().isUsableAsScratch());
532 #ifdef SKIA_OHOS
533 if (fBudgetedBytes >= fMaxBytes) {
534 HITRACE_OHOS_NAME_FMT_ALWAYS("cache over fBudgetedBytes:(%u), fMaxBytes:(%u)", fBudgetedBytes, fMaxBytes);
535 #ifdef SKIA_DFX_FOR_OHOS
536 SimpleCacheInfo simpleCacheInfo;
537 traceBeforePurgeUnlockRes("insertResource", simpleCacheInfo);
538 #endif
539 this->purgeAsNeeded();
540 #ifdef SKIA_DFX_FOR_OHOS
541 traceAfterPurgeUnlockRes("insertResource", simpleCacheInfo);
542 #endif
543 } else {
544 this->purgeAsNeeded();
545 }
546 #else
547 this->purgeAsNeeded();
548 #endif
549 }
550
removeResource(GrGpuResource * resource)551 void GrResourceCache::removeResource(GrGpuResource* resource) {
552 ASSERT_SINGLE_OWNER
553 this->validate();
554 SkASSERT(this->isInCache(resource));
555
556 size_t size = resource->gpuMemorySize();
557 if (resource->resourcePriv().isPurgeable()) {
558 fPurgeableQueue.remove(resource);
559 fPurgeableBytes -= size;
560 } else {
561 this->removeFromNonpurgeableArray(resource);
562 }
563
564 SkDEBUGCODE(--fCount;)
565 fBytes -= size;
566
567 // OH ISSUE: memory count
568 auto pid = resource->getResourceTag().fPid;
569 if (pid && resource->isRealAlloc()) {
570 auto& pidSize = fBytesOfPid[pid];
571 pidSize -= size;
572 fUpdatedBytesOfPid[pid] = pidSize;
573 if (pidSize == 0) {
574 fBytesOfPid.erase(pid);
575 }
576 }
577
578 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
579 --fBudgetedCount;
580 fBudgetedBytes -= size;
581 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
582 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
583 }
584
585 if (resource->cacheAccess().isUsableAsScratch()) {
586 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
587 }
588 if (resource->getUniqueKey().isValid()) {
589 fUniqueHash.remove(resource->getUniqueKey());
590 }
591 this->validate();
592 }
593
abandonAll()594 void GrResourceCache::abandonAll() {
595 AutoValidate av(this);
596
597 // We need to make sure to free any resources that were waiting on a free message but never
598 // received one.
599 fTexturesAwaitingUnref.reset();
600
601 while (fNonpurgeableResources.count()) {
602 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
603 SkASSERT(!back->wasDestroyed());
604 back->cacheAccess().abandon();
605 }
606
607 while (fPurgeableQueue.count()) {
608 GrGpuResource* top = fPurgeableQueue.peek();
609 SkASSERT(!top->wasDestroyed());
610 top->cacheAccess().abandon();
611 }
612
613 fThreadSafeCache->dropAllRefs();
614
615 SkASSERT(!fScratchMap.count());
616 SkASSERT(!fUniqueHash.count());
617 SkASSERT(!fCount);
618 SkASSERT(!this->getResourceCount());
619 SkASSERT(!fBytes);
620 SkASSERT(!fBudgetedCount);
621 SkASSERT(!fBudgetedBytes);
622 SkASSERT(!fPurgeableBytes);
623 SkASSERT(!fTexturesAwaitingUnref.count());
624 }
625
releaseAll()626 void GrResourceCache::releaseAll() {
627 AutoValidate av(this);
628
629 fThreadSafeCache->dropAllRefs();
630
631 this->processFreedGpuResources();
632
633 // We need to make sure to free any resources that were waiting on a free message but never
634 // received one.
635 fTexturesAwaitingUnref.reset();
636
637 SkASSERT(fProxyProvider); // better have called setProxyProvider
638 SkASSERT(fThreadSafeCache); // better have called setThreadSafeCache too
639
640 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
641 // they also have a raw pointer back to this class (which is presumably going away)!
642 fProxyProvider->removeAllUniqueKeys();
643
644 while (fNonpurgeableResources.count()) {
645 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
646 SkASSERT(!back->wasDestroyed());
647 back->cacheAccess().release();
648 }
649
650 while (fPurgeableQueue.count()) {
651 GrGpuResource* top = fPurgeableQueue.peek();
652 SkASSERT(!top->wasDestroyed());
653 top->cacheAccess().release();
654 }
655
656 SkASSERT(!fScratchMap.count());
657 SkASSERT(!fUniqueHash.count());
658 SkASSERT(!fCount);
659 SkASSERT(!this->getResourceCount());
660 SkASSERT(!fBytes);
661 SkASSERT(!fBudgetedCount);
662 SkASSERT(!fBudgetedBytes);
663 SkASSERT(!fPurgeableBytes);
664 SkASSERT(!fTexturesAwaitingUnref.count());
665 }
666
releaseByTag(const GrGpuResourceTag & tag)667 void GrResourceCache::releaseByTag(const GrGpuResourceTag& tag) {
668 AutoValidate av(this);
669 this->processFreedGpuResources();
670 SkASSERT(fProxyProvider); // better have called setProxyProvider
671 std::vector<GrGpuResource*> recycleVector;
672 for (int i = 0; i < fNonpurgeableResources.count(); i++) {
673 GrGpuResource* resource = fNonpurgeableResources[i];
674 if (tag.filter(resource->getResourceTag())) {
675 recycleVector.emplace_back(resource);
676 if (resource->getUniqueKey().isValid()) {
677 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
678 GrProxyProvider::InvalidateGPUResource::kNo);
679 }
680 }
681 }
682
683 for (int i = 0; i < fPurgeableQueue.count(); i++) {
684 GrGpuResource* resource = fPurgeableQueue.at(i);
685 if (tag.filter(resource->getResourceTag())) {
686 recycleVector.emplace_back(resource);
687 if (resource->getUniqueKey().isValid()) {
688 fProxyProvider->processInvalidUniqueKey(resource->getUniqueKey(), nullptr,
689 GrProxyProvider::InvalidateGPUResource::kNo);
690 }
691 }
692 }
693
694 for (auto resource : recycleVector) {
695 SkASSERT(!resource->wasDestroyed());
696 resource->cacheAccess().release();
697 }
698 }
699
setCurrentGrResourceTag(const GrGpuResourceTag & tag)700 void GrResourceCache::setCurrentGrResourceTag(const GrGpuResourceTag& tag) {
701 if (tag.isGrTagValid()) {
702 grResourceTagCacheStack.push(tag);
703 return;
704 }
705 if (!grResourceTagCacheStack.empty()) {
706 grResourceTagCacheStack.pop();
707 }
708 }
709
popGrResourceTag()710 void GrResourceCache::popGrResourceTag()
711 {
712 if (!grResourceTagCacheStack.empty()) {
713 grResourceTagCacheStack.pop();
714 }
715 }
716
getCurrentGrResourceTag() const717 GrGpuResourceTag GrResourceCache::getCurrentGrResourceTag() const {
718 if (grResourceTagCacheStack.empty()) {
719 return{};
720 }
721 return grResourceTagCacheStack.top();
722 }
723
getAllGrGpuResourceTags() const724 std::set<GrGpuResourceTag> GrResourceCache::getAllGrGpuResourceTags() const {
725 std::set<GrGpuResourceTag> result;
726 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
727 auto tag = fNonpurgeableResources[i]->getResourceTag();
728 result.insert(tag);
729 }
730 return result;
731 }
732
733 #ifdef SKIA_OHOS
734 // OH ISSUE: set purgeable resource max count limit.
setPurgeableResourceLimit(int purgeableMaxCount)735 void GrResourceCache::setPurgeableResourceLimit(int purgeableMaxCount)
736 {
737 fPurgeableMaxCount = purgeableMaxCount;
738 }
739 #endif
740
741 // OH ISSUE: get the memory information of the updated pid.
getUpdatedMemoryMap(std::unordered_map<int32_t,size_t> & out)742 void GrResourceCache::getUpdatedMemoryMap(std::unordered_map<int32_t, size_t> &out)
743 {
744 fUpdatedBytesOfPid.swap(out);
745 }
746
747 // OH ISSUE: init gpu memory limit.
initGpuMemoryLimit(MemoryOverflowCalllback callback,uint64_t size)748 void GrResourceCache::initGpuMemoryLimit(MemoryOverflowCalllback callback, uint64_t size)
749 {
750 if (fMemoryOverflowCallback_ == nullptr) {
751 fMemoryOverflowCallback_ = callback;
752 fMemoryControl_ = size;
753 }
754 }
755
756 // OH ISSUE: check whether the PID is abnormal.
isPidAbnormal() const757 bool GrResourceCache::isPidAbnormal() const
758 {
759 return fExitedPid_.find(getCurrentGrResourceTag().fPid) != fExitedPid_.end();
760 }
761
762 // OH ISSUE: change the fbyte when the resource tag changes.
changeByteOfPid(int32_t beforePid,int32_t afterPid,size_t bytes,bool beforeRealAlloc,bool afterRealAlloc)763 void GrResourceCache::changeByteOfPid(int32_t beforePid, int32_t afterPid,
764 size_t bytes, bool beforeRealAlloc, bool afterRealAlloc)
765 {
766 if (beforePid && beforeRealAlloc) {
767 auto& pidSize = fBytesOfPid[beforePid];
768 pidSize -= bytes;
769 fUpdatedBytesOfPid[beforePid] = pidSize;
770 if (pidSize == 0) {
771 fBytesOfPid.erase(beforePid);
772 }
773 }
774 if (afterPid && afterRealAlloc) {
775 auto& size = fBytesOfPid[afterPid];
776 size += bytes;
777 fUpdatedBytesOfPid[afterPid] = size;
778 }
779 }
780
refResource(GrGpuResource * resource)781 void GrResourceCache::refResource(GrGpuResource* resource) {
782 SkASSERT(resource);
783 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
784 if (resource->cacheAccess().hasRef()) {
785 resource->ref();
786 } else {
787 this->refAndMakeResourceMRU(resource);
788 }
789 this->validate();
790 }
791
792 class GrResourceCache::AvailableForScratchUse {
793 public:
AvailableForScratchUse()794 AvailableForScratchUse() { }
795
operator ()(const GrGpuResource * resource) const796 bool operator()(const GrGpuResource* resource) const {
797 // Everything that is in the scratch map should be usable as a
798 // scratch resource.
799 return true;
800 }
801 };
802
findAndRefScratchResource(const GrScratchKey & scratchKey)803 GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
804 SkASSERT(scratchKey.isValid());
805
806 GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
807 if (resource) {
808 fScratchMap.remove(scratchKey, resource);
809 this->refAndMakeResourceMRU(resource);
810 this->validate();
811 }
812 return resource;
813 }
814
willRemoveScratchKey(const GrGpuResource * resource)815 void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
816 ASSERT_SINGLE_OWNER
817 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
818 if (resource->cacheAccess().isUsableAsScratch()) {
819 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
820 }
821 }
822
removeUniqueKey(GrGpuResource * resource)823 void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
824 ASSERT_SINGLE_OWNER
825 // Someone has a ref to this resource in order to have removed the key. When the ref count
826 // reaches zero we will get a ref cnt notification and figure out what to do with it.
827 if (resource->getUniqueKey().isValid()) {
828 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
829 fUniqueHash.remove(resource->getUniqueKey());
830 }
831 resource->cacheAccess().removeUniqueKey();
832 if (resource->cacheAccess().isUsableAsScratch()) {
833 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
834 }
835
836 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
837 // require purging. However, the resource must be ref'ed to get here and therefore can't
838 // be purgeable. We'll purge it when the refs reach zero.
839 SkASSERT(!resource->resourcePriv().isPurgeable());
840 this->validate();
841 }
842
changeUniqueKey(GrGpuResource * resource,const GrUniqueKey & newKey)843 void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
844 ASSERT_SINGLE_OWNER
845 SkASSERT(resource);
846 SkASSERT(this->isInCache(resource));
847
848 // If another resource has the new key, remove its key then install the key on this resource.
849 if (newKey.isValid()) {
850 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
851 // If the old resource using the key is purgeable and is unreachable, then remove it.
852 if (!old->resourcePriv().getScratchKey().isValid() &&
853 old->resourcePriv().isPurgeable()) {
854 old->cacheAccess().release();
855 } else {
856 // removeUniqueKey expects an external owner of the resource.
857 this->removeUniqueKey(sk_ref_sp(old).get());
858 }
859 }
860 SkASSERT(nullptr == fUniqueHash.find(newKey));
861
862 // Remove the entry for this resource if it already has a unique key.
863 if (resource->getUniqueKey().isValid()) {
864 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
865 fUniqueHash.remove(resource->getUniqueKey());
866 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
867 } else {
868 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
869 // from the ScratchMap. The isUsableAsScratch call depends on us not adding the new
870 // unique key until after this check.
871 if (resource->cacheAccess().isUsableAsScratch()) {
872 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
873 }
874 }
875
876 resource->cacheAccess().setUniqueKey(newKey);
877 fUniqueHash.add(resource);
878 } else {
879 this->removeUniqueKey(resource);
880 }
881
882 this->validate();
883 }
884
refAndMakeResourceMRU(GrGpuResource * resource)885 void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
886 ASSERT_SINGLE_OWNER
887 SkASSERT(resource);
888 SkASSERT(this->isInCache(resource));
889 if (resource->resourcePriv().isPurgeable()) {
890 // It's about to become unpurgeable.
891 fPurgeableBytes -= resource->gpuMemorySize();
892 fPurgeableQueue.remove(resource);
893 this->addToNonpurgeableArray(resource);
894 } else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
895 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
896 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
897 fNumBudgetedResourcesFlushWillMakePurgeable--;
898 }
899 resource->cacheAccess().ref();
900
901 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
902 this->validate();
903 }
904
notifyARefCntReachedZero(GrGpuResource * resource,GrGpuResource::LastRemovedRef removedRef)905 void GrResourceCache::notifyARefCntReachedZero(GrGpuResource* resource,
906 GrGpuResource::LastRemovedRef removedRef) {
907 ASSERT_SINGLE_OWNER
908 SkASSERT(resource);
909 SkASSERT(!resource->wasDestroyed());
910 SkASSERT(this->isInCache(resource));
911 // This resource should always be in the nonpurgeable array when this function is called. It
912 // will be moved to the queue if it is newly purgeable.
913 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
914
915 if (removedRef == GrGpuResource::LastRemovedRef::kMainRef) {
916 if (resource->cacheAccess().isUsableAsScratch()) {
917 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
918 }
919 }
920
921 if (resource->cacheAccess().hasRefOrCommandBufferUsage()) {
922 this->validate();
923 return;
924 }
925
926 #ifdef SK_DEBUG
927 // When the timestamp overflows validate() is called. validate() checks that resources in
928 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
929 // the purgeable queue happens just below in this function. So we mark it as an exception.
930 if (resource->resourcePriv().isPurgeable()) {
931 fNewlyPurgeableResourceForValidation = resource;
932 }
933 #endif
934 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
935 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
936
937 if (!resource->resourcePriv().isPurgeable() &&
938 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
939 ++fNumBudgetedResourcesFlushWillMakePurgeable;
940 }
941
942 if (!resource->resourcePriv().isPurgeable()) {
943 this->validate();
944 return;
945 }
946
947 this->removeFromNonpurgeableArray(resource);
948 fPurgeableQueue.insert(resource);
949 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
950 fPurgeableBytes += resource->gpuMemorySize();
951
952 bool hasUniqueKey = resource->getUniqueKey().isValid();
953
954 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
955
956 if (budgetedType == GrBudgetedType::kBudgeted) {
957 // Purge the resource immediately if we're over budget
958 // Also purge if the resource has neither a valid scratch key nor a unique key.
959 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
960 if (!this->overBudget() && hasKey) {
961 return;
962 }
963 } else {
964 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
965 // they can be reused again by the image connected to the unique key.
966 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
967 return;
968 }
969 // Check whether this resource could still be used as a scratch resource.
970 if (!resource->resourcePriv().refsWrappedObjects() &&
971 resource->resourcePriv().getScratchKey().isValid()) {
972 // We won't purge an existing resource to make room for this one.
973 if (this->wouldFit(resource->gpuMemorySize())) {
974 resource->resourcePriv().makeBudgeted();
975 return;
976 }
977 }
978 }
979
980 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
981 resource->cacheAccess().release();
982 // We should at least free this resource, perhaps dependent resources as well.
983 SkASSERT(this->getResourceCount() < beforeCount);
984 this->validate();
985 }
986
didChangeBudgetStatus(GrGpuResource * resource)987 void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
988 ASSERT_SINGLE_OWNER
989 SkASSERT(resource);
990 SkASSERT(this->isInCache(resource));
991
992 size_t size = resource->gpuMemorySize();
993 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
994 // resource become purgeable. However, we should never allow that transition. Wrapped
995 // resources are the only resources that can be in that state and they aren't allowed to
996 // transition from one budgeted state to another.
997 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
998 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
999 ++fBudgetedCount;
1000 fBudgetedBytes += size;
1001 #if GR_CACHE_STATS
1002 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
1003 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
1004 #endif
1005 if (!resource->resourcePriv().isPurgeable() &&
1006 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1007 ++fNumBudgetedResourcesFlushWillMakePurgeable;
1008 }
1009 if (resource->cacheAccess().isUsableAsScratch()) {
1010 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
1011 }
1012 this->purgeAsNeeded();
1013 } else {
1014 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
1015 #ifdef SKIA_OHOS
1016 GrPerfMonitorReporter::GetInstance().recordTextureCache(resource->getResourceTag().fName);
1017 #endif
1018 --fBudgetedCount;
1019 fBudgetedBytes -= size;
1020 if (!resource->resourcePriv().isPurgeable() &&
1021 !resource->cacheAccess().hasRefOrCommandBufferUsage()) {
1022 --fNumBudgetedResourcesFlushWillMakePurgeable;
1023 }
1024 if (!resource->cacheAccess().hasRef() && !resource->getUniqueKey().isValid() &&
1025 resource->resourcePriv().getScratchKey().isValid()) {
1026 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
1027 }
1028 }
1029 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
1030 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
1031 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
1032
1033 this->validate();
1034 }
1035
1036 static constexpr int timeUnit = 1000;
1037
1038 // OH ISSUE: allow access to release interface
allowToPurge(const std::function<bool (void)> & nextFrameHasArrived)1039 bool GrResourceCache::allowToPurge(const std::function<bool(void)>& nextFrameHasArrived)
1040 {
1041 if (!fEnabled) {
1042 return true;
1043 }
1044 if (fFrameInfo.duringFrame == 0) {
1045 if (nextFrameHasArrived && nextFrameHasArrived()) {
1046 return false;
1047 }
1048 return true;
1049 }
1050 if (fFrameInfo.frameCount != fLastFrameCount) { // the next frame arrives
1051 struct timespec startTime = {0, 0};
1052 if (clock_gettime(CLOCK_REALTIME, &startTime) == -1) {
1053 return true;
1054 }
1055 fStartTime = startTime.tv_sec * timeUnit * timeUnit + startTime.tv_nsec / timeUnit;
1056 fLastFrameCount = fFrameInfo.frameCount;
1057 return true;
1058 }
1059 struct timespec endTime = {0, 0};
1060 if (clock_gettime(CLOCK_REALTIME, &endTime) == -1) {
1061 return true;
1062 }
1063 if (((endTime.tv_sec * timeUnit * timeUnit + endTime.tv_nsec / timeUnit) - fStartTime) >= fOvertimeDuration) {
1064 return false;
1065 }
1066 return true;
1067 }
1068
purgeAsNeeded(const std::function<bool (void)> & nextFrameHasArrived)1069 void GrResourceCache::purgeAsNeeded(const std::function<bool(void)>& nextFrameHasArrived) {
1070 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
1071 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
1072 if (invalidKeyMsgs.count()) {
1073 SkASSERT(fProxyProvider);
1074
1075 for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
1076 if (invalidKeyMsgs[i].inThreadSafeCache()) {
1077 fThreadSafeCache->remove(invalidKeyMsgs[i].key());
1078 SkASSERT(!fThreadSafeCache->has(invalidKeyMsgs[i].key()));
1079 } else {
1080 fProxyProvider->processInvalidUniqueKey(
1081 invalidKeyMsgs[i].key(), nullptr,
1082 GrProxyProvider::InvalidateGPUResource::kYes);
1083 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
1084 }
1085 }
1086 }
1087
1088 this->processFreedGpuResources();
1089
1090 bool stillOverbudget = this->overBudget(nextFrameHasArrived);
1091 while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1092 GrGpuResource* resource = fPurgeableQueue.peek();
1093 SkASSERT(resource->resourcePriv().isPurgeable());
1094 resource->cacheAccess().release();
1095 stillOverbudget = this->overBudget(nextFrameHasArrived);
1096 }
1097
1098 if (stillOverbudget) {
1099 fThreadSafeCache->dropUniqueRefs(this);
1100
1101 stillOverbudget = this->overBudget(nextFrameHasArrived);
1102 while (stillOverbudget && fPurgeableQueue.count() && this->allowToPurge(nextFrameHasArrived)) {
1103 GrGpuResource* resource = fPurgeableQueue.peek();
1104 SkASSERT(resource->resourcePriv().isPurgeable());
1105 resource->cacheAccess().release();
1106 stillOverbudget = this->overBudget(nextFrameHasArrived);
1107 }
1108 }
1109
1110 this->validate();
1111 }
1112
purgeUnlockedResources(const GrStdSteadyClock::time_point * purgeTime,bool scratchResourcesOnly)1113 void GrResourceCache::purgeUnlockedResources(const GrStdSteadyClock::time_point* purgeTime,
1114 bool scratchResourcesOnly) {
1115 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1116 SimpleCacheInfo simpleCacheInfo;
1117 traceBeforePurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1118 #endif
1119 if (!scratchResourcesOnly) {
1120 if (purgeTime) {
1121 fThreadSafeCache->dropUniqueRefsOlderThan(*purgeTime);
1122 } else {
1123 fThreadSafeCache->dropUniqueRefs(nullptr);
1124 }
1125
1126 // We could disable maintaining the heap property here, but it would add a lot of
1127 // complexity. Moreover, this is rarely called.
1128 while (fPurgeableQueue.count()) {
1129 GrGpuResource* resource = fPurgeableQueue.peek();
1130 const GrStdSteadyClock::time_point resourceTime =
1131 resource->cacheAccess().timeWhenResourceBecamePurgeable();
1132 if (purgeTime && resourceTime >= *purgeTime) {
1133 // Resources were given both LRU timestamps and tagged with a frame number when
1134 // they first became purgeable. The LRU timestamp won't change again until the
1135 // resource is made non-purgeable again. So, at this point all the remaining
1136 // resources in the timestamp-sorted queue will have a frame number >= to this
1137 // one.
1138 break;
1139 }
1140
1141 SkASSERT(resource->resourcePriv().isPurgeable());
1142 resource->cacheAccess().release();
1143 }
1144 } else {
1145 // Early out if the very first item is too new to purge to avoid sorting the queue when
1146 // nothing will be deleted.
1147 if (purgeTime && fPurgeableQueue.count() &&
1148 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable() >= *purgeTime) {
1149 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1150 traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1151 #endif
1152 return;
1153 }
1154
1155 // Sort the queue
1156 fPurgeableQueue.sort();
1157
1158 // Make a list of the scratch resources to delete
1159 SkTDArray<GrGpuResource*> scratchResources;
1160 for (int i = 0; i < fPurgeableQueue.count(); i++) {
1161 GrGpuResource* resource = fPurgeableQueue.at(i);
1162 const GrStdSteadyClock::time_point resourceTime =
1163 resource->cacheAccess().timeWhenResourceBecamePurgeable();
1164 if (purgeTime && resourceTime >= *purgeTime) {
1165 // scratch or not, all later iterations will be too recently used to purge.
1166 break;
1167 }
1168 SkASSERT(resource->resourcePriv().isPurgeable());
1169 if (!resource->getUniqueKey().isValid()) {
1170 *scratchResources.append() = resource;
1171 }
1172 }
1173
1174 // Delete the scratch resources. This must be done as a separate pass
1175 // to avoid messing up the sorted order of the queue
1176 for (int i = 0; i < scratchResources.count(); i++) {
1177 scratchResources.getAt(i)->cacheAccess().release();
1178 }
1179 }
1180
1181 this->validate();
1182 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1183 traceAfterPurgeUnlockRes("purgeUnlockedResources", simpleCacheInfo);
1184 #endif
1185 }
1186
purgeUnlockAndSafeCacheGpuResources()1187 void GrResourceCache::purgeUnlockAndSafeCacheGpuResources() {
1188 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1189 SimpleCacheInfo simpleCacheInfo;
1190 traceBeforePurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1191 #endif
1192 fThreadSafeCache->dropUniqueRefs(nullptr);
1193 // Sort the queue
1194 fPurgeableQueue.sort();
1195
1196 //Make a list of the scratch resources to delete
1197 SkTDArray<GrGpuResource*> scratchResources;
1198 for (int i = 0; i < fPurgeableQueue.count(); i++) {
1199 GrGpuResource* resource = fPurgeableQueue.at(i);
1200 if (!resource) {
1201 continue;
1202 }
1203 SkASSERT(resource->resourcePriv().isPurgeable());
1204 if (!resource->getUniqueKey().isValid()) {
1205 *scratchResources.append() = resource;
1206 }
1207 }
1208
1209 //Delete the scatch resource. This must be done as a separate pass
1210 //to avoid messing up the sorted order of the queue
1211 for (int i = 0; i <scratchResources.count(); i++) {
1212 scratchResources.getAt(i)->cacheAccess().release();
1213 }
1214
1215 this->validate();
1216 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1217 traceAfterPurgeUnlockRes("purgeUnlockAndSafeCacheGpuResources", simpleCacheInfo);
1218 #endif
1219 }
1220
1221 // OH ISSUE: suppress release window
suppressGpuCacheBelowCertainRatio(const std::function<bool (void)> & nextFrameHasArrived)1222 void GrResourceCache::suppressGpuCacheBelowCertainRatio(const std::function<bool(void)>& nextFrameHasArrived) {
1223 if (!fEnabled) {
1224 return;
1225 }
1226 this->purgeAsNeeded(nextFrameHasArrived);
1227 }
1228
purgeCacheBetweenFrames(bool scratchResourcesOnly,const std::set<int> & exitedPidSet,const std::set<int> & protectedPidSet)1229 void GrResourceCache::purgeCacheBetweenFrames(bool scratchResourcesOnly, const std::set<int>& exitedPidSet,
1230 const std::set<int>& protectedPidSet) {
1231 HITRACE_OHOS_NAME_FMT_ALWAYS("PurgeGrResourceCache cur=%d, limit=%d", fBudgetedBytes, fMaxBytes);
1232 if (exitedPidSet.size() > 1) {
1233 for (int i = 1; i < fPurgeableQueue.count(); i++) {
1234 GrGpuResource* resource = fPurgeableQueue.at(i);
1235 SkASSERT(resource->resourcePriv().isPurgeable());
1236 if (exitedPidSet.find(resource->getResourceTag().fPid) != exitedPidSet.end()) {
1237 resource->cacheAccess().release();
1238 this->validate();
1239 return;
1240 }
1241 }
1242 }
1243 fPurgeableQueue.sort();
1244 const char* softLimitPercentage = "0.9";
1245 #ifdef NOT_BUILD_FOR_OHOS_SDK
1246 static int softLimit =
1247 std::atof(OHOS::system::GetParameter("persist.sys.graphic.mem.soft_limit",
1248 softLimitPercentage).c_str()) * fMaxBytes;
1249 #else
1250 static int softLimit = 0.9 * fMaxBytes;
1251 #endif
1252 if (fBudgetedBytes >= softLimit) {
1253 for (int i=0; i < fPurgeableQueue.count(); i++) {
1254 GrGpuResource* resource = fPurgeableQueue.at(i);
1255 SkASSERT(resource->resourcePriv().isPurgeable());
1256 if (protectedPidSet.find(resource->getResourceTag().fPid) == protectedPidSet.end() &&
1257 (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1258 resource->cacheAccess().release();
1259 this->validate();
1260 return;
1261 }
1262 }
1263 }
1264 }
1265
purgeUnlockedResourcesByPid(bool scratchResourceOnly,const std::set<int> & exitedPidSet)1266 void GrResourceCache::purgeUnlockedResourcesByPid(bool scratchResourceOnly, const std::set<int>& exitedPidSet) {
1267 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1268 SimpleCacheInfo simpleCacheInfo;
1269 traceBeforePurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1270 #endif
1271 // Sort the queue
1272 fPurgeableQueue.sort();
1273
1274 //Make lists of the need purged resources to delete
1275 fThreadSafeCache->dropUniqueRefs(nullptr);
1276 SkTDArray<GrGpuResource*> exitPidResources;
1277 SkTDArray<GrGpuResource*> scratchResources;
1278 for (int i = 0; i < fPurgeableQueue.count(); i++) {
1279 GrGpuResource* resource = fPurgeableQueue.at(i);
1280 if (!resource) {
1281 continue;
1282 }
1283 SkASSERT(resource->resourcePriv().isPurgeable());
1284 if (exitedPidSet.count(resource->getResourceTag().fPid)) {
1285 *exitPidResources.append() = resource;
1286 } else if (!resource->getUniqueKey().isValid()) {
1287 *scratchResources.append() = resource;
1288 }
1289 }
1290
1291 //Delete the exited pid and scatch resource. This must be done as a separate pass
1292 //to avoid messing up the sorted order of the queue
1293 for (int i = 0; i <exitPidResources.count(); i++) {
1294 exitPidResources.getAt(i)->cacheAccess().release();
1295 }
1296 for (int i = 0; i <scratchResources.count(); i++) {
1297 scratchResources.getAt(i)->cacheAccess().release();
1298 }
1299
1300 for (auto pid : exitedPidSet) {
1301 fExitedPid_.erase(pid);
1302 }
1303
1304 this->validate();
1305 #if defined (SKIA_OHOS) && defined (SKIA_DFX_FOR_OHOS)
1306 traceAfterPurgeUnlockRes("purgeUnlockedResourcesByPid", simpleCacheInfo);
1307 #endif
1308 }
1309
purgeUnlockedResourcesByTag(bool scratchResourcesOnly,const GrGpuResourceTag & tag)1310 void GrResourceCache::purgeUnlockedResourcesByTag(bool scratchResourcesOnly, const GrGpuResourceTag& tag) {
1311 // Sort the queue
1312 fPurgeableQueue.sort();
1313
1314 //Make a list of the scratch resources to delete
1315 SkTDArray<GrGpuResource*> scratchResources;
1316 for (int i = 0; i < fPurgeableQueue.count(); i++) {
1317 GrGpuResource* resource = fPurgeableQueue.at(i);
1318 SkASSERT(resource->resourcePriv().isPurgeable());
1319 if (tag.filter(resource->getResourceTag()) &&
1320 (!scratchResourcesOnly || !resource->getUniqueKey().isValid())) {
1321 *scratchResources.append() = resource;
1322 }
1323 }
1324
1325 //Delete the scatch resource. This must be done as a separate pass
1326 //to avoid messing up the sorted order of the queue
1327 for (int i = 0; i <scratchResources.count(); i++) {
1328 scratchResources.getAt(i)->cacheAccess().release();
1329 }
1330
1331 this->validate();
1332 }
1333
purgeToMakeHeadroom(size_t desiredHeadroomBytes)1334 bool GrResourceCache::purgeToMakeHeadroom(size_t desiredHeadroomBytes) {
1335 AutoValidate av(this);
1336 if (desiredHeadroomBytes > fMaxBytes) {
1337 return false;
1338 }
1339 if (this->wouldFit(desiredHeadroomBytes)) {
1340 return true;
1341 }
1342 fPurgeableQueue.sort();
1343
1344 size_t projectedBudget = fBudgetedBytes;
1345 int purgeCnt = 0;
1346 for (int i = 0; i < fPurgeableQueue.count(); i++) {
1347 GrGpuResource* resource = fPurgeableQueue.at(i);
1348 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1349 projectedBudget -= resource->gpuMemorySize();
1350 }
1351 if (projectedBudget + desiredHeadroomBytes <= fMaxBytes) {
1352 purgeCnt = i + 1;
1353 break;
1354 }
1355 }
1356 if (purgeCnt == 0) {
1357 return false;
1358 }
1359
1360 // Success! Release the resources.
1361 // Copy to array first so we don't mess with the queue.
1362 std::vector<GrGpuResource*> resources;
1363 resources.reserve(purgeCnt);
1364 for (int i = 0; i < purgeCnt; i++) {
1365 resources.push_back(fPurgeableQueue.at(i));
1366 }
1367 for (GrGpuResource* resource : resources) {
1368 resource->cacheAccess().release();
1369 }
1370 return true;
1371 }
1372
purgeUnlockedResources(size_t bytesToPurge,bool preferScratchResources)1373 void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
1374
1375 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
1376 bool stillOverbudget = tmpByteBudget < fBytes;
1377
1378 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
1379 // Sort the queue
1380 fPurgeableQueue.sort();
1381
1382 // Make a list of the scratch resources to delete
1383 SkTDArray<GrGpuResource*> scratchResources;
1384 size_t scratchByteCount = 0;
1385 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
1386 GrGpuResource* resource = fPurgeableQueue.at(i);
1387 SkASSERT(resource->resourcePriv().isPurgeable());
1388 if (!resource->getUniqueKey().isValid()) {
1389 *scratchResources.append() = resource;
1390 scratchByteCount += resource->gpuMemorySize();
1391 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
1392 }
1393 }
1394
1395 // Delete the scratch resources. This must be done as a separate pass
1396 // to avoid messing up the sorted order of the queue
1397 for (int i = 0; i < scratchResources.count(); i++) {
1398 scratchResources.getAt(i)->cacheAccess().release();
1399 }
1400 stillOverbudget = tmpByteBudget < fBytes;
1401
1402 this->validate();
1403 }
1404
1405 // Purge any remaining resources in LRU order
1406 if (stillOverbudget) {
1407 const size_t cachedByteCount = fMaxBytes;
1408 fMaxBytes = tmpByteBudget;
1409 this->purgeAsNeeded();
1410 fMaxBytes = cachedByteCount;
1411 }
1412 }
1413
requestsFlush() const1414 bool GrResourceCache::requestsFlush() const {
1415 return this->overBudget() && !fPurgeableQueue.count() &&
1416 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
1417 }
1418
insertDelayedTextureUnref(GrTexture * texture)1419 void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
1420 texture->ref();
1421 uint32_t id = texture->uniqueID().asUInt();
1422 if (auto* data = fTexturesAwaitingUnref.find(id)) {
1423 data->addRef();
1424 } else {
1425 fTexturesAwaitingUnref.set(id, {texture});
1426 }
1427 }
1428
processFreedGpuResources()1429 void GrResourceCache::processFreedGpuResources() {
1430 if (!fTexturesAwaitingUnref.count()) {
1431 return;
1432 }
1433
1434 SkTArray<GrTextureFreedMessage> msgs;
1435 fFreedTextureInbox.poll(&msgs);
1436 for (int i = 0; i < msgs.count(); ++i) {
1437 SkASSERT(msgs[i].fIntendedRecipient == fOwningContextID);
1438 uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
1439 TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
1440 // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
1441 // empty and we would have returned early above. Thus, any texture from a message should be
1442 // in the list of fTexturesAwaitingUnref.
1443 SkASSERT(info);
1444 info->unref();
1445 if (info->finished()) {
1446 fTexturesAwaitingUnref.remove(id);
1447 }
1448 }
1449 }
1450
addToNonpurgeableArray(GrGpuResource * resource)1451 void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
1452 int index = fNonpurgeableResources.count();
1453 *fNonpurgeableResources.append() = resource;
1454 *resource->cacheAccess().accessCacheIndex() = index;
1455 }
1456
removeFromNonpurgeableArray(GrGpuResource * resource)1457 void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
1458 int* index = resource->cacheAccess().accessCacheIndex();
1459 // Fill the hole we will create in the array with the tail object, adjust its index, and
1460 // then pop the array
1461 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
1462 SkASSERT(fNonpurgeableResources[*index] == resource);
1463 fNonpurgeableResources[*index] = tail;
1464 *tail->cacheAccess().accessCacheIndex() = *index;
1465 fNonpurgeableResources.pop();
1466 SkDEBUGCODE(*index = -1);
1467 }
1468
getNextTimestamp()1469 uint32_t GrResourceCache::getNextTimestamp() {
1470 // If we wrap then all the existing resources will appear older than any resources that get
1471 // a timestamp after the wrap.
1472 if (0 == fTimestamp) {
1473 int count = this->getResourceCount();
1474 if (count) {
1475 // Reset all the timestamps. We sort the resources by timestamp and then assign
1476 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
1477 // rare.
1478 SkTDArray<GrGpuResource*> sortedPurgeableResources;
1479 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
1480
1481 while (fPurgeableQueue.count()) {
1482 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
1483 fPurgeableQueue.pop();
1484 }
1485
1486 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
1487 CompareTimestamp);
1488
1489 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
1490 // timestamp and assign new timestamps.
1491 int currP = 0;
1492 int currNP = 0;
1493 while (currP < sortedPurgeableResources.count() &&
1494 currNP < fNonpurgeableResources.count()) {
1495 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
1496 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
1497 SkASSERT(tsP != tsNP);
1498 if (tsP < tsNP) {
1499 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1500 } else {
1501 // Correct the index in the nonpurgeable array stored on the resource post-sort.
1502 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1503 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1504 }
1505 }
1506
1507 // The above loop ended when we hit the end of one array. Finish the other one.
1508 while (currP < sortedPurgeableResources.count()) {
1509 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
1510 }
1511 while (currNP < fNonpurgeableResources.count()) {
1512 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
1513 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
1514 }
1515
1516 // Rebuild the queue.
1517 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
1518 fPurgeableQueue.insert(sortedPurgeableResources[i]);
1519 }
1520
1521 this->validate();
1522 SkASSERT(count == this->getResourceCount());
1523
1524 // count should be the next timestamp we return.
1525 SkASSERT(fTimestamp == SkToU32(count));
1526 }
1527 }
1528 return fTimestamp++;
1529 }
1530
1531 #ifdef SKIA_DFX_FOR_RECORD_VKIMAGE
dumpAllResource(std::stringstream & dump) const1532 void GrResourceCache::dumpAllResource(std::stringstream &dump) const {
1533 if (getResourceCount() == 0) {
1534 return;
1535 }
1536 dump << "Purgeable: " << fPurgeableQueue.count() << std::endl;
1537 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1538 GrGpuResource* resource = fPurgeableQueue.at(i);
1539 if (resource == nullptr) {
1540 continue;
1541 }
1542 if (strcmp(resource->getResourceType(), "VkImage") != 0) {
1543 continue;
1544 }
1545 dump << i << " " << resource->getResourceType() << " ";
1546 resource->dumpVkImageInfo(dump);
1547 }
1548 dump << "Non-Purgeable: " << fNonpurgeableResources.count() << std::endl;
1549 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1550 GrGpuResource* resource = fNonpurgeableResources[i];
1551 if (resource == nullptr) {
1552 continue;
1553 }
1554 if (strcmp(resource->getResourceType(), "VkImage") != 0) {
1555 continue;
1556 }
1557 dump << i << " " << resource->getResourceType() << " ";
1558 resource->dumpVkImageInfo(dump);
1559 }
1560 #ifdef SK_VULKAN
1561 dump << "Destroy Record: " << std::endl;
1562 ParallelDebug::DumpAllDestroyVkImage(dump);
1563 #endif
1564 }
1565 #endif
1566
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump) const1567 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
1568 SkTDArray<GrGpuResource*> resources;
1569 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1570 *resources.append() = fNonpurgeableResources[i];
1571 }
1572 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1573 *resources.append() = fPurgeableQueue.at(i);
1574 }
1575 for (int i = 0; i < resources.count(); i++) {
1576 auto resource = resources.getAt(i);
1577 if (!resource || resource->wasDestroyed()) {
1578 continue;
1579 }
1580 resource->dumpMemoryStatistics(traceMemoryDump);
1581 }
1582 }
1583
dumpMemoryStatistics(SkTraceMemoryDump * traceMemoryDump,const GrGpuResourceTag & tag) const1584 void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump, const GrGpuResourceTag& tag) const {
1585 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1586 if (tag.filter(fNonpurgeableResources[i]->getResourceTag())) {
1587 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
1588 }
1589 }
1590 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1591 if (tag.filter(fPurgeableQueue.at(i)->getResourceTag())) {
1592 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
1593 }
1594 }
1595 }
1596
1597 #if GR_CACHE_STATS
getStats(Stats * stats) const1598 void GrResourceCache::getStats(Stats* stats) const {
1599 stats->reset();
1600
1601 stats->fTotal = this->getResourceCount();
1602 stats->fNumNonPurgeable = fNonpurgeableResources.count();
1603 stats->fNumPurgeable = fPurgeableQueue.count();
1604
1605 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1606 stats->update(fNonpurgeableResources[i]);
1607 }
1608 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1609 stats->update(fPurgeableQueue.at(i));
1610 }
1611 }
1612
1613 #if GR_TEST_UTILS
dumpStats(SkString * out) const1614 void GrResourceCache::dumpStats(SkString* out) const {
1615 this->validate();
1616
1617 Stats stats;
1618
1619 this->getStats(&stats);
1620
1621 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
1622
1623 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
1624 out->appendf("\t\tEntry Count: current %d"
1625 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
1626 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
1627 stats.fScratch, fHighWaterCount);
1628 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
1629 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
1630 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
1631 }
1632
dumpStatsKeyValuePairs(SkTArray<SkString> * keys,SkTArray<double> * values) const1633 void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
1634 SkTArray<double>* values) const {
1635 this->validate();
1636
1637 Stats stats;
1638 this->getStats(&stats);
1639
1640 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
1641 }
1642 #endif // GR_TEST_UTILS
1643 #endif // GR_CACHE_STATS
1644
1645 #ifdef SK_DEBUG
validate() const1646 void GrResourceCache::validate() const {
1647 // Reduce the frequency of validations for large resource counts.
1648 static SkRandom gRandom;
1649 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
1650 if (~mask && (gRandom.nextU() & mask)) {
1651 return;
1652 }
1653
1654 struct Stats {
1655 size_t fBytes;
1656 int fBudgetedCount;
1657 size_t fBudgetedBytes;
1658 int fLocked;
1659 int fScratch;
1660 int fCouldBeScratch;
1661 int fContent;
1662 const ScratchMap* fScratchMap;
1663 const UniqueHash* fUniqueHash;
1664
1665 Stats(const GrResourceCache* cache) {
1666 memset(this, 0, sizeof(*this));
1667 fScratchMap = &cache->fScratchMap;
1668 fUniqueHash = &cache->fUniqueHash;
1669 }
1670
1671 void update(GrGpuResource* resource) {
1672 fBytes += resource->gpuMemorySize();
1673
1674 if (!resource->resourcePriv().isPurgeable()) {
1675 ++fLocked;
1676 }
1677
1678 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
1679 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
1680
1681 if (resource->cacheAccess().isUsableAsScratch()) {
1682 SkASSERT(!uniqueKey.isValid());
1683 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType());
1684 SkASSERT(!resource->cacheAccess().hasRef());
1685 ++fScratch;
1686 SkASSERT(fScratchMap->countForKey(scratchKey));
1687 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1688 } else if (scratchKey.isValid()) {
1689 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
1690 uniqueKey.isValid() || resource->cacheAccess().hasRef());
1691 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
1692 SkASSERT(!fScratchMap->has(resource, scratchKey));
1693 }
1694 if (uniqueKey.isValid()) {
1695 ++fContent;
1696 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
1697 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
1698 resource->resourcePriv().refsWrappedObjects());
1699 }
1700
1701 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
1702 ++fBudgetedCount;
1703 fBudgetedBytes += resource->gpuMemorySize();
1704 }
1705 }
1706 };
1707
1708 {
1709 int count = 0;
1710 fScratchMap.foreach([&](const GrGpuResource& resource) {
1711 SkASSERT(resource.cacheAccess().isUsableAsScratch());
1712 count++;
1713 });
1714 SkASSERT(count == fScratchMap.count());
1715 }
1716
1717 Stats stats(this);
1718 size_t purgeableBytes = 0;
1719 int numBudgetedResourcesFlushWillMakePurgeable = 0;
1720
1721 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
1722 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
1723 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
1724 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
1725 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
1726 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
1727 !fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
1728 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
1729 ++numBudgetedResourcesFlushWillMakePurgeable;
1730 }
1731 stats.update(fNonpurgeableResources[i]);
1732 }
1733 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
1734 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
1735 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
1736 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
1737 stats.update(fPurgeableQueue.at(i));
1738 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
1739 }
1740
1741 SkASSERT(fCount == this->getResourceCount());
1742 SkASSERT(fBudgetedCount <= fCount);
1743 SkASSERT(fBudgetedBytes <= fBytes);
1744 SkASSERT(stats.fBytes == fBytes);
1745 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
1746 numBudgetedResourcesFlushWillMakePurgeable);
1747 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
1748 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
1749 SkASSERT(purgeableBytes == fPurgeableBytes);
1750 #if GR_CACHE_STATS
1751 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
1752 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
1753 SkASSERT(fBytes <= fHighWaterBytes);
1754 SkASSERT(fCount <= fHighWaterCount);
1755 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
1756 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
1757 #endif
1758 SkASSERT(stats.fContent == fUniqueHash.count());
1759 SkASSERT(stats.fScratch == fScratchMap.count());
1760
1761 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
1762 // calls. This will be fixed when subresource registration is explicit.
1763 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
1764 // SkASSERT(!overBudget || locked == count || fPurging);
1765 }
1766
isInCache(const GrGpuResource * resource) const1767 bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
1768 int index = *resource->cacheAccess().accessCacheIndex();
1769 if (index < 0) {
1770 return false;
1771 }
1772 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
1773 return true;
1774 }
1775 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
1776 return true;
1777 }
1778 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
1779 return false;
1780 }
1781
1782 #endif // SK_DEBUG
1783
1784 #if GR_TEST_UTILS
1785
countUniqueKeysWithTag(const char * tag) const1786 int GrResourceCache::countUniqueKeysWithTag(const char* tag) const {
1787 int count = 0;
1788 fUniqueHash.foreach([&](const GrGpuResource& resource){
1789 if (0 == strcmp(tag, resource.getUniqueKey().tag())) {
1790 ++count;
1791 }
1792 });
1793 return count;
1794 }
1795
changeTimestamp(uint32_t newTimestamp)1796 void GrResourceCache::changeTimestamp(uint32_t newTimestamp) {
1797 fTimestamp = newTimestamp;
1798 }
1799
1800 #endif // GR_TEST_UTILS
1801