• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "memory/rs_memory_manager.h"
17 
18 #include <fstream>
19 #include <malloc.h>
20 #include <sstream>
21 #include <string>
22 #include "include/core/SkGraphics.h"
23 #include "rs_trace.h"
24 #include "third_party/cJSON/cJSON.h"
25 
26 #include "memory/rs_dfx_string.h"
27 #include "skia_adapter/rs_skia_memory_tracer.h"
28 #include "skia_adapter/skia_graphics.h"
29 #include "memory/rs_memory_graphic.h"
30 #include "include/gpu/GrDirectContext.h"
31 #include "src/gpu/GrDirectContextPriv.h"
32 
33 #include "common/rs_background_thread.h"
34 #include "common/rs_obj_abs_geometry.h"
35 #include "common/rs_singleton.h"
36 #include "memory/rs_tag_tracker.h"
37 #ifdef NEW_RENDER_CONTEXT
38 #include "render_context/memory_handler.h"
39 #endif
40 #include "pipeline/rs_main_thread.h"
41 #include "pipeline/rs_surface_render_node.h"
42 #include "platform/common/rs_log.h"
43 #include "platform/common/rs_system_properties.h"
44 
45 #include "app_mgr_client.h"
46 #include "hisysevent.h"
47 #include "image/gpu_context.h"
48 
49 #ifdef RS_ENABLE_VK
50 #include "pipeline/rs_vk_image_manager.h"
51 #include "platform/ohos/backend/rs_vulkan_context.h"
52 #endif
53 
54 namespace OHOS::Rosen {
55 namespace {
56 const std::string KERNEL_CONFIG_PATH = "/system/etc/hiview/kernel_leak_config.json";
57 constexpr uint32_t MEMUNIT_RATE = 1024;
58 constexpr uint32_t MEMORY_REPORT_INTERVAL = 24 * 60 * 60 * 1000; // Each process can report at most once a day.
59 constexpr uint32_t FRAME_NUMBER = 10; // Check memory every ten frames.
60 constexpr const char* MEM_RS_TYPE = "renderservice";
61 constexpr const char* MEM_CPU_TYPE = "cpu";
62 constexpr const char* MEM_GPU_TYPE = "gpu";
63 constexpr const char* MEM_JEMALLOC_TYPE = "jemalloc";
64 constexpr const char* MEM_SNAPSHOT = "snapshot";
65 constexpr int DUPM_STRING_BUF_SIZE = 4000;
66 }
67 
68 std::mutex MemoryManager::mutex_;
69 std::unordered_map<pid_t, std::pair<std::string, uint64_t>> MemoryManager::pidInfo_;
70 uint32_t MemoryManager::frameCount_ = 0;
71 uint64_t MemoryManager::memoryWarning_ = UINT64_MAX;
72 uint64_t MemoryManager::totalMemoryReportTime_ = 0;
73 
DumpMemoryUsage(DfxString & log,std::string & type)74 void MemoryManager::DumpMemoryUsage(DfxString& log, std::string& type)
75 {
76     if (type.empty() || type == MEM_RS_TYPE) {
77         DumpRenderServiceMemory(log);
78     }
79     if (type.empty() || type == MEM_CPU_TYPE) {
80         DumpDrawingCpuMemory(log);
81     }
82     if (type.empty() || type == MEM_GPU_TYPE) {
83         RSUniRenderThread::Instance().DumpMem(log);
84     }
85     if (type.empty() || type == MEM_JEMALLOC_TYPE) {
86         std::string out;
87         DumpMallocStat(out);
88         log.AppendFormat("%s\n... detail dump at hilog\n", out.c_str());
89     }
90     if (type.empty() || type == MEM_SNAPSHOT) {
91         DumpMemorySnapshot(log);
92     }
93 }
94 
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)95 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
96 {
97 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
98     if (!gpuContext) {
99         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
100         return;
101     }
102     RS_TRACE_NAME_FMT("ReleaseAllGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
103         tag.fPid, tag.fTid, tag.fWid, tag.fFid);
104     gpuContext->ReleaseByTag(tag);
105 #endif
106 }
107 
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,pid_t pid)108 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, pid_t pid)
109 {
110 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
111     Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseAllGpuResource");
112     ReleaseAllGpuResource(gpuContext, tag);
113 #endif
114 }
115 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)116 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
117 {
118 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
119     if (!gpuContext) {
120         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
121         return;
122     }
123     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
124         tag.fPid, tag.fTid, tag.fWid, tag.fFid);
125     gpuContext->PurgeUnlockedResourcesByTag(false, tag);
126 #endif
127 }
128 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,std::set<pid_t> exitedPidSet)129 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, std::set<pid_t> exitedPidSet)
130 {
131 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
132     if (!gpuContext) {
133         RS_LOGE("ReleaseGpuResByPid fail, gpuContext is nullptr");
134         return;
135     }
136     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource exitedPidSet size: %d", exitedPidSet.size());
137     gpuContext->PurgeUnlockedResourcesByPid(false, exitedPidSet);
138     MemorySnapshot::Instance().EraseSnapshotInfoByPid(exitedPidSet);
139     ErasePidInfo(exitedPidSet);
140 #endif
141 }
142 
PurgeCacheBetweenFrames(Drawing::GPUContext * gpuContext,bool scratchResourceOnly,std::set<pid_t> & exitedPidSet,std::set<pid_t> & protectedPidSet)143 void MemoryManager::PurgeCacheBetweenFrames(Drawing::GPUContext* gpuContext, bool scratchResourceOnly,
144     std::set<pid_t>& exitedPidSet, std::set<pid_t>& protectedPidSet)
145 {
146 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
147     if (!gpuContext) {
148         RS_LOGE("PurgeCacheBetweenFrames fail, gpuContext is nullptr");
149         return;
150     }
151     gpuContext->PurgeCacheBetweenFrames(scratchResourceOnly, exitedPidSet, protectedPidSet);
152 #endif
153 }
154 
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,NodeId surfaceNodeId)155 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, NodeId surfaceNodeId)
156 {
157 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
158     Drawing::GPUResourceTag tag(ExtractPid(surfaceNodeId), 0, 0, 0, "ReleaseUnlockGpuResource");
159     ReleaseUnlockGpuResource(grContext, tag);
160 #endif
161 }
162 
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,pid_t pid)163 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, pid_t pid)
164 {
165 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
166     Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
167     ReleaseUnlockGpuResource(grContext, tag); // clear gpu resource by pid
168 #endif
169 }
170 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,bool scratchResourcesOnly)171 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, bool scratchResourcesOnly)
172 {
173 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
174     if (!gpuContext) {
175         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
176         return;
177     }
178     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource scratchResourcesOnly:%d", scratchResourcesOnly);
179     gpuContext->PurgeUnlockedResources(scratchResourcesOnly);
180 #endif
181 }
182 
ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext * gpuContext)183 void MemoryManager::ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext* gpuContext)
184 {
185 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
186     if (!gpuContext) {
187         RS_LOGE("ReleaseUnlockAndSafeCacheGpuResource fail, gpuContext is nullptr");
188         return;
189     }
190     RS_TRACE_NAME_FMT("ReleaseUnlockAndSafeCacheGpuResource");
191     gpuContext->PurgeUnlockAndSafeCacheGpuResources();
192 #endif
193 }
194 
SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext * gpuContext,bool enabled)195 void MemoryManager::SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext* gpuContext, bool enabled)
196 {
197 #if defined(RS_ENABLE_VK)
198     if (!gpuContext) {
199         RS_LOGE("SetGpuCacheSuppressWindowSwitch fail, gpuContext is nullptr");
200         return;
201     }
202     gpuContext->SetGpuCacheSuppressWindowSwitch(enabled);
203 #endif
204 }
205 
SetGpuMemoryAsyncReclaimerSwitch(Drawing::GPUContext * gpuContext,bool enabled)206 void MemoryManager::SetGpuMemoryAsyncReclaimerSwitch(Drawing::GPUContext* gpuContext, bool enabled)
207 {
208 #if defined(RS_ENABLE_VK)
209     if (!gpuContext) {
210         RS_LOGE("SetGpuMemoryAsyncReclaimerSwitch fail, gpuContext is nullptr");
211         return;
212     }
213     gpuContext->SetGpuMemoryAsyncReclaimerSwitch(enabled);
214 #endif
215 }
216 
FlushGpuMemoryInWaitQueue(Drawing::GPUContext * gpuContext)217 void MemoryManager::FlushGpuMemoryInWaitQueue(Drawing::GPUContext* gpuContext)
218 {
219 #if defined(RS_ENABLE_VK)
220     if (!gpuContext) {
221         RS_LOGE("FlushGpuMemoryInWaitQueue fail, gpuContext is nullptr");
222         return;
223     }
224     gpuContext->FlushGpuMemoryInWaitQueue();
225 #endif
226 }
227 
SuppressGpuCacheBelowCertainRatio(Drawing::GPUContext * gpuContext,const std::function<bool (void)> & nextFrameHasArrived)228 void MemoryManager::SuppressGpuCacheBelowCertainRatio(
229     Drawing::GPUContext* gpuContext, const std::function<bool(void)>& nextFrameHasArrived)
230 {
231 #if defined(RS_ENABLE_VK)
232     if (!gpuContext) {
233         RS_LOGE("SuppressGpuCacheBelowCertainRatio fail, gpuContext is nullptr");
234         return;
235     }
236     gpuContext->SuppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
237 #endif
238 }
239 
GetAppGpuMemoryInMB(Drawing::GPUContext * gpuContext)240 float MemoryManager::GetAppGpuMemoryInMB(Drawing::GPUContext* gpuContext)
241 {
242     if (!gpuContext) {
243         return 0.f;
244     }
245 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
246     Drawing::TraceMemoryDump trace("category", true);
247     gpuContext->DumpMemoryStatistics(&trace);
248     auto total = trace.GetGpuMemorySizeInMB();
249     float rsMemSize = 0.f;
250     for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
251         Drawing::GPUResourceTag resourceTag(0, 0, 0, tagtype,
252             RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype)));
253         Drawing::TraceMemoryDump gpuTrace("category", true);
254         gpuContext->DumpMemoryStatisticsByTag(&gpuTrace, resourceTag);
255         rsMemSize += gpuTrace.GetGpuMemorySizeInMB();
256     }
257     return total - rsMemSize;
258 #else
259     return 0.f;
260 #endif
261 }
262 
DumpPidMemory(DfxString & log,int pid,const Drawing::GPUContext * gpuContext)263 void MemoryManager::DumpPidMemory(DfxString& log, int pid, const Drawing::GPUContext* gpuContext)
264 {
265     MemoryGraphic mem = CountPidMemory(pid, gpuContext);
266     log.AppendFormat("GPU Mem(MB):%f\n", mem.GetGpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
267     log.AppendFormat("CPU Mem(KB):%f\n", mem.GetCpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
268     log.AppendFormat("Total Mem(MB):%f\n", mem.GetTotalMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
269 }
270 
CountPidMemory(int pid,const Drawing::GPUContext * gpuContext)271 MemoryGraphic MemoryManager::CountPidMemory(int pid, const Drawing::GPUContext* gpuContext)
272 {
273     MemoryGraphic totalMemGraphic;
274 
275     // Count mem of RS
276     totalMemGraphic.SetPid(pid);
277 
278 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
279     // Count mem of Skia GPU
280     if (gpuContext) {
281         Drawing::TraceMemoryDump gpuTracer("category", true);
282         Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
283         gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, tag);
284         float gpuMem = gpuTracer.GetGLMemorySize();
285         totalMemGraphic.IncreaseGpuMemory(gpuMem);
286     }
287 #endif
288 
289     return totalMemGraphic;
290 }
291 
CountMemory(std::vector<pid_t> pids,const Drawing::GPUContext * gpuContext,std::vector<MemoryGraphic> & mems)292 void MemoryManager::CountMemory(
293     std::vector<pid_t> pids, const Drawing::GPUContext* gpuContext, std::vector<MemoryGraphic>& mems)
294 {
295     auto countMem = [&gpuContext, &mems] (pid_t pid) {
296         mems.emplace_back(CountPidMemory(pid, gpuContext));
297     };
298     // Count mem of Skia GPU
299     std::for_each(pids.begin(), pids.end(), countMem);
300 }
301 
FindGeoById(uint64_t nodeId)302 static std::tuple<uint64_t, std::string, RectI> FindGeoById(uint64_t nodeId)
303 {
304     const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
305     auto node = nodeMap.GetRenderNode<RSRenderNode>(nodeId);
306     uint64_t windowId = nodeId;
307     std::string windowName = "NONE";
308     RectI nodeFrameRect;
309     if (!node) {
310         return { windowId, windowName, nodeFrameRect };
311     }
312     nodeFrameRect =
313         (node->GetRenderProperties().GetBoundsGeometry())->GetAbsRect();
314     // Obtain the window according to childId
315     auto parent = node->GetParent().lock();
316     bool windowsNameFlag = false;
317     while (parent) {
318         if (parent->IsInstanceOf<RSSurfaceRenderNode>()) {
319             const auto& surfaceNode = RSBaseRenderNode::ReinterpretCast<RSSurfaceRenderNode>(parent);
320             windowName = surfaceNode->GetName();
321             windowId = surfaceNode->GetId();
322             windowsNameFlag = true;
323             break;
324         }
325         parent = parent->GetParent().lock();
326     }
327     if (!windowsNameFlag) {
328         windowName = "EXISTS-BUT-NO-SURFACE";
329     }
330     return { windowId, windowName, nodeFrameRect };
331 }
332 
DumpRenderServiceMemory(DfxString & log)333 void MemoryManager::DumpRenderServiceMemory(DfxString& log)
334 {
335     log.AppendFormat("\n----------\nRenderService caches:\n");
336     MemoryTrack::Instance().DumpMemoryStatistics(log, FindGeoById);
337 }
338 
DumpDrawingCpuMemory(DfxString & log)339 void MemoryManager::DumpDrawingCpuMemory(DfxString& log)
340 {
341     // CPU
342     std::string cpuInfo = "Skia CPU caches : pid:" + std::to_string(getpid()) +
343         ", threadId:" + std::to_string(gettid());
344 #ifdef ROSEN_OHOS
345     char threadName[16]; // thread name is restricted to 16 bytes
346     auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
347     if (result == 0) {
348         cpuInfo = cpuInfo + ", threadName: " + threadName;
349     }
350 #endif
351     log.AppendFormat("\n----------\n%s\n", cpuInfo.c_str());
352     log.AppendFormat("Font Cache (CPU):\n");
353     log.AppendFormat("  Size: %.2f kB \n", Drawing::SkiaGraphics::GetFontCacheUsed() / MEMUNIT_RATE);
354     log.AppendFormat("  Glyph Count: %d \n", Drawing::SkiaGraphics::GetFontCacheCountUsed());
355 
356     std::vector<ResourcePair> cpuResourceMap = {
357         { "skia/sk_resource_cache/bitmap_", "Bitmaps" },
358         { "skia/sk_resource_cache/rrect-blur_", "Masks" },
359         { "skia/sk_resource_cache/rects-blur_", "Masks" },
360         { "skia/sk_resource_cache/tessellated", "Shadows" },
361         { "skia/sk_resource_cache/yuv-planes_", "YUVPlanes" },
362         { "skia/sk_resource_cache/budget_glyph_count", "Bitmaps" },
363     };
364     SkiaMemoryTracer cpuTracer(cpuResourceMap, true);
365     Drawing::SkiaGraphics::DumpMemoryStatistics(&cpuTracer);
366     log.AppendFormat("CPU Cachesxx:\n");
367     cpuTracer.LogOutput(log);
368     log.AppendFormat("Total CPU memory usage:\n");
369     cpuTracer.LogTotals(log);
370 
371     // cache limit
372     size_t cacheLimit = Drawing::SkiaGraphics::GetResourceCacheTotalByteLimit();
373     size_t fontCacheLimit = Drawing::SkiaGraphics::GetFontCacheLimit();
374     log.AppendFormat("\ncpu cache limit = %zu ( fontcache = %zu ):\n", cacheLimit, fontCacheLimit);
375 }
376 
DumpGpuCache(DfxString & log,const Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag * tag,std::string & name)377 void MemoryManager::DumpGpuCache(
378     DfxString& log, const Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag* tag, std::string& name)
379 {
380     if (!gpuContext) {
381         log.AppendFormat("gpuContext is nullptr.\n");
382         return;
383     }
384     /* GPU */
385 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
386     log.AppendFormat("\n---------------\nSkia GPU Caches:%s\n", name.c_str());
387     Drawing::TraceMemoryDump gpuTracer("category", true);
388     if (tag) {
389         gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, *tag);
390     } else {
391         gpuContext->DumpMemoryStatistics(&gpuTracer);
392 #ifdef RS_ENABLE_VK
393         RsVulkanMemStat& memStat = RsVulkanContext::GetSingleton().GetRsVkMemStat();
394         memStat.DumpMemoryStatistics(&gpuTracer);
395 #endif
396     }
397     gpuTracer.LogOutput(log);
398     log.AppendFormat("Total GPU memory usage:\n");
399     gpuTracer.LogTotals(log);
400 #endif
401 }
402 
DumpAllGpuInfo(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)403 void MemoryManager::DumpAllGpuInfo(DfxString& log, const Drawing::GPUContext* gpuContext,
404     std::vector<std::pair<NodeId, std::string>>& nodeTags)
405 {
406     if (!gpuContext) {
407         log.AppendFormat("No valid gpu cache instance.\n");
408         return;
409     }
410 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
411     for (auto& nodeTag : nodeTags) {
412         Drawing::GPUResourceTag tag(ExtractPid(nodeTag.first), 0, nodeTag.first, 0, nodeTag.second);
413         DumpGpuCache(log, gpuContext, &tag, nodeTag.second);
414     }
415 #endif
416 }
417 
DumpDrawingGpuMemory(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)418 void MemoryManager::DumpDrawingGpuMemory(DfxString& log, const Drawing::GPUContext* gpuContext,
419     std::vector<std::pair<NodeId, std::string>>& nodeTags)
420 {
421     if (!gpuContext) {
422         log.AppendFormat("No valid gpu cache instance.\n");
423         return;
424     }
425     /* GPU */
426 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
427     std::string gpuInfo = "pid:" + std::to_string(getpid()) + ", threadId:" + std::to_string(gettid());
428 #ifdef ROSEN_OHOS
429     char threadName[16]; // thread name is restricted to 16 bytes
430     auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
431     if (result == 0) {
432         gpuInfo = gpuInfo + ", threadName: " + threadName;
433     }
434 #endif
435     // total
436     DumpGpuCache(log, gpuContext, nullptr, gpuInfo);
437     // Get memory of window by tag
438     DumpAllGpuInfo(log, gpuContext, nodeTags);
439     for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
440         std::string tagTypeName = RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype));
441         Drawing::GPUResourceTag tag(0, 0, 0, tagtype, tagTypeName);
442         DumpGpuCache(log, gpuContext, &tag, tagTypeName);
443     }
444     // cache limit
445     size_t cacheLimit = 0;
446     size_t cacheUsed = 0;
447     gpuContext->GetResourceCacheLimits(nullptr, &cacheLimit);
448     gpuContext->GetResourceCacheUsage(nullptr, &cacheUsed);
449     log.AppendFormat("\ngpu limit = %zu ( used = %zu ):\n", cacheLimit, cacheUsed);
450 
451     /* ShaderCache */
452     log.AppendFormat("\n---------------\nShader Caches:\n");
453 #ifdef NEW_RENDER_CONTEXT
454     log.AppendFormat(MemoryHandler::QuerryShader().c_str());
455 #else
456     std::shared_ptr<RenderContext> rendercontext = std::make_shared<RenderContext>();
457     log.AppendFormat(rendercontext->GetShaderCacheSize().c_str());
458 #endif
459     // gpu stat
460     DumpGpuStats(log, gpuContext);
461 #endif
462 }
463 
DumpGpuStats(DfxString & log,const Drawing::GPUContext * gpuContext)464 void MemoryManager::DumpGpuStats(DfxString& log, const Drawing::GPUContext* gpuContext)
465 {
466     log.AppendFormat("\n---------------\ndumpGpuStats:\n");
467     std::string stat;
468     gpuContext->DumpGpuStats(stat);
469 
470     int statIndex = 0;
471     int statLength = stat.length();
472     while (statIndex < statLength) {
473         std::string statSubStr;
474         if (statLength - statIndex > DUPM_STRING_BUF_SIZE) {
475             statSubStr = stat.substr(statIndex, DUPM_STRING_BUF_SIZE);
476             statIndex += DUPM_STRING_BUF_SIZE;
477         } else {
478             statSubStr = stat.substr(statIndex, statLength - statIndex);
479             statIndex = statLength;
480         }
481         log.AppendFormat("%s", statSubStr.c_str());
482     }
483     log.AppendFormat("\ndumpGpuStats end\n---------------\n");
484 }
485 
DumpMallocStat(std::string & log)486 void MemoryManager::DumpMallocStat(std::string& log)
487 {
488     malloc_stats_print(
489         [](void* fp, const char* str) {
490             if (!fp) {
491                 RS_LOGE("DumpMallocStat fp is nullptr");
492                 return;
493             }
494             std::string* sp = static_cast<std::string*>(fp);
495             if (str) {
496                 // cause log only support 2096 len. we need to only output critical log
497                 // and only put total log in RSLOG
498                 // get allocated string
499                 if (strncmp(str, "Allocated", strlen("Allocated")) == 0) {
500                     sp->append(str);
501                 }
502                 RS_LOGW("[mallocstat]:%{public}s", str);
503             }
504         },
505         &log, nullptr);
506 }
507 
DumpMemorySnapshot(DfxString & log)508 void MemoryManager::DumpMemorySnapshot(DfxString& log)
509 {
510     log.AppendFormat("\n---------------\nmemorySnapshots:\n");
511     std::unordered_map<pid_t, MemorySnapshotInfo> memorySnapshotInfo;
512     MemorySnapshot::Instance().GetMemorySnapshot(memorySnapshotInfo);
513     for (auto& [pid, snapshotInfo] : memorySnapshotInfo) {
514         std::string infoStr = "pid: " + std::to_string(pid) +
515             ", cpu: " + std::to_string(snapshotInfo.cpuMemory) +
516             ", gpu: " + std::to_string(snapshotInfo.gpuMemory);
517         log.AppendFormat("%s\n", infoStr.c_str());
518     }
519 }
520 
ParseMemoryLimit(const cJSON * json,const char * name)521 uint64_t ParseMemoryLimit(const cJSON* json, const char* name)
522 {
523     cJSON* jsonItem = cJSON_GetObjectItem(json, name);
524     if (jsonItem != nullptr && cJSON_IsNumber(jsonItem)) {
525         return static_cast<uint64_t>(jsonItem->valueint) * MEMUNIT_RATE * MEMUNIT_RATE;
526     }
527     return UINT64_MAX;
528 }
529 
InitMemoryLimit(Drawing::GPUContext * gpuContext)530 void MemoryManager::InitMemoryLimit(Drawing::GPUContext* gpuContext)
531 {
532     std::ifstream configFile;
533     configFile.open(KERNEL_CONFIG_PATH);
534     std::stringstream filterParamsStream;
535     filterParamsStream << configFile.rdbuf();
536     configFile.close();
537     std::string paramsString = filterParamsStream.str();
538 
539     cJSON* root = cJSON_Parse(paramsString.c_str());
540     if (root == nullptr) {
541         RS_LOGE("MemoryManager::InitMemoryLimit can not parse config to json");
542         return;
543     }
544     cJSON* kernelLeak = cJSON_GetObjectItem(root, "KernelLeak");
545     if (kernelLeak == nullptr) {
546         RS_LOGE("MemoryManager::InitMemoryLimit can not find kernelLeak");
547         cJSON_Delete(root);
548         return;
549     }
550     cJSON* version = cJSON_GetObjectItem(kernelLeak, RSSystemProperties::GetVersionType().c_str());
551     if (version == nullptr) {
552         RS_LOGE("MemoryManager::InitMemoryLimit can not find version");
553         cJSON_Delete(root);
554         return;
555     }
556     cJSON* rsWatchPoint = cJSON_GetObjectItem(version, "rs_watchpoint");
557     if (rsWatchPoint == nullptr) {
558         RS_LOGE("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
559         cJSON_Delete(root);
560         return;
561     }
562     // warning threshold for total memory of a single process
563     memoryWarning_ = ParseMemoryLimit(rsWatchPoint, "process_warning_threshold");
564     // error threshold for cpu memory of a single process
565     uint64_t cpuMemoryControl = ParseMemoryLimit(rsWatchPoint, "process_cpu_control_threshold");
566     // error threshold for gpu memory of a single process
567     uint64_t gpuMemoryControl = ParseMemoryLimit(rsWatchPoint, "process_gpu_control_threshold");
568     // threshold for the total memory of all processes in renderservice
569     uint64_t totalMemoryWarning = ParseMemoryLimit(rsWatchPoint, "total_threshold");
570     cJSON_Delete(root);
571 
572     if (gpuContext != nullptr) {
573         gpuContext->InitGpuMemoryLimit(MemoryOverflow, gpuMemoryControl);
574     }
575     MemorySnapshot::Instance().InitMemoryLimit(MemoryOverflow, memoryWarning_, cpuMemoryControl, totalMemoryWarning);
576 }
577 
MemoryOverCheck(Drawing::GPUContext * gpuContext)578 void MemoryManager::MemoryOverCheck(Drawing::GPUContext* gpuContext)
579 {
580 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
581     frameCount_++;
582     if (!gpuContext || frameCount_ < FRAME_NUMBER) {
583         return;
584     }
585     frameCount_ = 0;
586     std::unordered_map<pid_t, size_t> gpuMemory;
587     gpuContext->GetUpdatedMemoryMap(gpuMemory);
588 
589     auto task = [gpuMemory = std::move(gpuMemory)]() {
590         std::unordered_map<pid_t, MemorySnapshotInfo> infoMap;
591         bool isTotalOver = false;
592         MemorySnapshot::Instance().UpdateGpuMemoryInfo(gpuMemory, infoMap, isTotalOver);
593         auto now = std::chrono::steady_clock::now().time_since_epoch();
594         uint64_t currentTime = std::chrono::duration_cast<std::chrono::milliseconds>(now).count();
595         // total memory overflow of all processes in renderservice
596         if (isTotalOver && currentTime > totalMemoryReportTime_) {
597             TotalMemoryOverReport(infoMap);
598             totalMemoryReportTime_ = currentTime + MEMORY_REPORT_INTERVAL;
599         }
600 
601         std::string bundleName;
602         bool needReport = false;
603         for (const auto& [pid, memoryInfo] : infoMap) {
604             if (memoryInfo.TotalMemory() <= memoryWarning_) {
605                 continue;
606             }
607             needReport = false;
608             {
609                 std::lock_guard<std::mutex> lock(mutex_);
610                 auto it = pidInfo_.find(pid);
611                 if (it == pidInfo_.end()) {
612                     int32_t uid;
613                     auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
614                     appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
615                     pidInfo_.emplace(pid, std::make_pair(bundleName, currentTime + MEMORY_REPORT_INTERVAL));
616                     needReport = true;
617                 } else if (currentTime > it->second.second) {
618                     it->second.second = currentTime + MEMORY_REPORT_INTERVAL;
619                     bundleName = it->second.first;
620                     needReport = true;
621                 }
622             }
623             if (needReport) {
624                 MemoryOverReport(pid, memoryInfo, bundleName, "RENDER_MEMORY_OVER_WARNING");
625             }
626         }
627     };
628     RSBackgroundThread::Instance().PostTask(task);
629 #endif
630 }
631 
MemoryOverflow(pid_t pid,size_t overflowMemory,bool isGpu)632 void MemoryManager::MemoryOverflow(pid_t pid, size_t overflowMemory, bool isGpu)
633 {
634     MemorySnapshotInfo info;
635     MemorySnapshot::Instance().GetMemorySnapshotInfoByPid(pid, info);
636     if (isGpu) {
637         info.gpuMemory = overflowMemory;
638     }
639     int32_t uid;
640     std::string bundleName;
641     auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
642     appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
643     MemoryOverReport(pid, info, bundleName, "RENDER_MEMORY_OVER_ERROR");
644     RS_LOGE("RSMemoryOverflow pid[%{public}d] cpu[%{public}zu] gpu[%{public}zu]", pid, info.cpuMemory, info.gpuMemory);
645 }
646 
MemoryOverReport(const pid_t pid,const MemorySnapshotInfo & info,const std::string & bundleName,const std::string & reportName)647 void MemoryManager::MemoryOverReport(const pid_t pid, const MemorySnapshotInfo& info, const std::string& bundleName,
648     const std::string& reportName)
649 {
650     HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, reportName,
651         OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "PID", pid,
652         "BUNDLE_NAME", bundleName,
653         "CPU_MEMORY", info.cpuMemory,
654         "GPU_MEMORY", info.gpuMemory,
655         "TOTAL_MEMORY", info.TotalMemory());
656     RS_LOGW("RSMemoryOverReport pid[%d] bundleName[%{public}s] cpu[%{public}zu] gpu[%{public}zu] total[%{public}zu]",
657         pid, bundleName.c_str(), info.cpuMemory, info.gpuMemory, info.TotalMemory());
658 }
659 
TotalMemoryOverReport(const std::unordered_map<pid_t,MemorySnapshotInfo> & infoMap)660 void MemoryManager::TotalMemoryOverReport(const std::unordered_map<pid_t, MemorySnapshotInfo>& infoMap)
661 {
662     std::ostringstream oss;
663     for (const auto& info : infoMap) {
664         oss << info.first << '_' << info.second.TotalMemory() << ' ';
665     }
666     HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, "RENDER_MEMORY_OVER_TOTAL_ERROR",
667         OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "MEMORY_MSG", oss.str());
668 }
669 
ErasePidInfo(const std::set<pid_t> & exitedPidSet)670 void MemoryManager::ErasePidInfo(const std::set<pid_t>& exitedPidSet)
671 {
672     std::lock_guard<std::mutex> lock(mutex_);
673     for (auto pid : exitedPidSet) {
674         pidInfo_.erase(pid);
675     }
676 }
677 
VmaDefragment(Drawing::GPUContext * gpuContext)678 void MemoryManager::VmaDefragment(Drawing::GPUContext* gpuContext)
679 {
680 #if defined(RS_ENABLE_VK)
681     if (!gpuContext) {
682         RS_LOGE("VmaDefragment fail, gpuContext is nullptr");
683         return;
684     }
685     RS_TRACE_NAME_FMT("VmaDefragment");
686     gpuContext->VmaDefragment();
687 #endif
688 }
689 } // namespace OHOS::Rosen