• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "memory/rs_memory_manager.h"
17 
18 #include <fstream>
19 #include <malloc.h>
20 #include <sstream>
21 #include <string>
22 #include <sys/prctl.h>
23 #include "include/core/SkGraphics.h"
24 #include "rs_trace.h"
25 #include "third_party/cJSON/cJSON.h"
26 
27 #include "memory/rs_dfx_string.h"
28 #include "skia_adapter/rs_skia_memory_tracer.h"
29 #include "skia_adapter/skia_graphics.h"
30 #include "memory/rs_memory_graphic.h"
31 #include "include/gpu/GrDirectContext.h"
32 #include "include/gpu/vk/GrVulkanTrackerInterface.h"
33 #include "src/gpu/GrDirectContextPriv.h"
34 
35 #include "common/rs_background_thread.h"
36 #include "common/rs_obj_abs_geometry.h"
37 #include "common/rs_singleton.h"
38 #include "feature/uifirst/rs_sub_thread_manager.h"
39 #include "feature_cfg/feature_param/extend_feature/mem_param.h"
40 #include "feature_cfg/graphic_feature_param_manager.h"
41 #include "memory/rs_tag_tracker.h"
42 #include "pipeline/main_thread/rs_main_thread.h"
43 #include "pipeline/rs_surface_render_node.h"
44 #include "platform/common/rs_log.h"
45 #include "platform/common/rs_system_properties.h"
46 
47 #include "app_mgr_client.h"
48 #include "hisysevent.h"
49 #include "image/gpu_context.h"
50 #include "platform/common/rs_hisysevent.h"
51 
52 #ifdef RS_ENABLE_VK
53 #include "feature/gpuComposition/rs_vk_image_manager.h"
54 #include "platform/ohos/backend/rs_vulkan_context.h"
55 #endif
56 #ifdef RES_SCHED_ENABLE
57 #include "res_sched_client.h"
58 #include "res_sched_kill_reason.h"
59 #endif
GetThreadName()60 static inline const char* GetThreadName()
61 {
62     static constexpr int nameLen = 16;
63     static thread_local char threadName[nameLen + 1] = "";
64     if (threadName[0] == 0) {
65         prctl(PR_GET_NAME, threadName);
66         threadName[nameLen] = 0;
67     }
68     return threadName;
69 }
70 
71 namespace OHOS::Rosen {
72 namespace {
73 const std::string KERNEL_CONFIG_PATH = "/system/etc/hiview/kernel_leak_config.json";
74 const std::string EVENT_ENTER_RECENTS = "GESTURE_TO_RECENTS";
75 constexpr uint32_t MEMUNIT_RATE = 1024;
76 constexpr uint32_t MEMORY_REPORT_INTERVAL = 24 * 60 * 60 * 1000; // Each process can report at most once a day.
77 constexpr uint32_t FRAME_NUMBER = 10; // Check memory every ten frames.
78 constexpr uint32_t CLEAR_TWO_APPS_TIME = 1000; // 1000ms
79 constexpr const char* MEM_RS_TYPE = "renderservice";
80 constexpr const char* MEM_CPU_TYPE = "cpu";
81 constexpr const char* MEM_GPU_TYPE = "gpu";
82 constexpr const char* MEM_JEMALLOC_TYPE = "jemalloc";
83 constexpr const char* MEM_SNAPSHOT = "snapshot";
84 constexpr int DUPM_STRING_BUF_SIZE = 4000;
85 }
86 
87 std::mutex MemoryManager::mutex_;
88 std::unordered_map<pid_t, std::pair<std::string, uint64_t>> MemoryManager::pidInfo_;
89 uint32_t MemoryManager::frameCount_ = 0;
90 uint64_t MemoryManager::memoryWarning_ = UINT64_MAX;
91 uint64_t MemoryManager::gpuMemoryControl_ = UINT64_MAX;
92 uint64_t MemoryManager::totalMemoryReportTime_ = 0;
93 
DumpMemoryUsage(DfxString & log,std::string & type)94 void MemoryManager::DumpMemoryUsage(DfxString& log, std::string& type)
95 {
96     if (type.empty() || type == MEM_RS_TYPE) {
97         DumpRenderServiceMemory(log);
98     }
99     if (type.empty() || type == MEM_CPU_TYPE) {
100         DumpDrawingCpuMemory(log);
101     }
102     if (type.empty() || type == MEM_GPU_TYPE) {
103         RSUniRenderThread::Instance().DumpMem(log);
104     }
105     if (type.empty() || type == MEM_JEMALLOC_TYPE) {
106         std::string out;
107         DumpMallocStat(out);
108         log.AppendFormat("%s\n... detail dump at hilog\n", out.c_str());
109     }
110     if (type.empty() || type == MEM_SNAPSHOT) {
111         DumpMemorySnapshot(log);
112     }
113 }
114 
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)115 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
116 {
117 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
118     if (!gpuContext) {
119         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
120         return;
121     }
122     RS_TRACE_NAME_FMT("ReleaseAllGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
123         tag.fPid, tag.fTid, tag.fWid, tag.fFid);
124     gpuContext->ReleaseByTag(tag);
125 #endif
126 }
127 
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,pid_t pid)128 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, pid_t pid)
129 {
130 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
131     Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseAllGpuResource");
132     ReleaseAllGpuResource(gpuContext, tag);
133 #endif
134 }
135 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)136 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
137 {
138 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
139     if (!gpuContext) {
140         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
141         return;
142     }
143     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
144         tag.fPid, tag.fTid, tag.fWid, tag.fFid);
145     gpuContext->PurgeUnlockedResourcesByTag(false, tag);
146 #endif
147 }
148 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,std::set<pid_t> exitedPidSet)149 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, std::set<pid_t> exitedPidSet)
150 {
151 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
152     if (!gpuContext) {
153         RS_LOGE("ReleaseGpuResByPid fail, gpuContext is nullptr");
154         return;
155     }
156     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource exitedPidSet size: %d", exitedPidSet.size());
157     gpuContext->PurgeUnlockedResourcesByPid(false, exitedPidSet);
158     MemorySnapshot::Instance().EraseSnapshotInfoByPid(exitedPidSet);
159     ErasePidInfo(exitedPidSet);
160 #endif
161 }
162 
PurgeCacheBetweenFrames(Drawing::GPUContext * gpuContext,bool scratchResourceOnly,std::set<pid_t> & exitedPidSet,std::set<pid_t> & protectedPidSet)163 void MemoryManager::PurgeCacheBetweenFrames(Drawing::GPUContext* gpuContext, bool scratchResourceOnly,
164     std::set<pid_t>& exitedPidSet, std::set<pid_t>& protectedPidSet)
165 {
166 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
167     if (!gpuContext) {
168         RS_LOGE("PurgeCacheBetweenFrames fail, gpuContext is nullptr");
169         return;
170     }
171     gpuContext->PurgeCacheBetweenFrames(scratchResourceOnly, exitedPidSet, protectedPidSet);
172 #endif
173 }
174 
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,NodeId surfaceNodeId)175 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, NodeId surfaceNodeId)
176 {
177 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
178     Drawing::GPUResourceTag tag(ExtractPid(surfaceNodeId), 0, 0, 0, "ReleaseUnlockGpuResource");
179     ReleaseUnlockGpuResource(grContext, tag);
180 #endif
181 }
182 
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,pid_t pid)183 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, pid_t pid)
184 {
185 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
186     Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
187     ReleaseUnlockGpuResource(grContext, tag); // clear gpu resource by pid
188 #endif
189 }
190 
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,bool scratchResourcesOnly)191 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, bool scratchResourcesOnly)
192 {
193 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
194     if (!gpuContext) {
195         RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
196         return;
197     }
198     RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource scratchResourcesOnly:%d", scratchResourcesOnly);
199     gpuContext->PurgeUnlockedResources(scratchResourcesOnly);
200 #endif
201 }
202 
ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext * gpuContext)203 void MemoryManager::ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext* gpuContext)
204 {
205 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
206     if (!gpuContext) {
207         RS_LOGE("ReleaseUnlockAndSafeCacheGpuResource fail, gpuContext is nullptr");
208         return;
209     }
210     RS_TRACE_NAME_FMT("ReleaseUnlockAndSafeCacheGpuResource");
211     gpuContext->PurgeUnlockAndSafeCacheGpuResources();
212 #endif
213 }
214 
SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext * gpuContext,bool enabled)215 void MemoryManager::SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext* gpuContext, bool enabled)
216 {
217 #if defined(RS_ENABLE_VK)
218     if (!gpuContext) {
219         RS_LOGE("SetGpuCacheSuppressWindowSwitch fail, gpuContext is nullptr");
220         return;
221     }
222     gpuContext->SetGpuCacheSuppressWindowSwitch(enabled);
223 #endif
224 }
225 
SetGpuMemoryAsyncReclaimerSwitch(Drawing::GPUContext * gpuContext,bool enabled,const std::function<void ()> & setThreadPriority)226 void MemoryManager::SetGpuMemoryAsyncReclaimerSwitch(
227     Drawing::GPUContext* gpuContext, bool enabled, const std::function<void()>& setThreadPriority)
228 {
229 #if defined(RS_ENABLE_VK)
230     if (!gpuContext) {
231         RS_LOGE("SetGpuMemoryAsyncReclaimerSwitch fail, gpuContext is nullptr");
232         return;
233     }
234     gpuContext->SetGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
235 #endif
236 }
237 
FlushGpuMemoryInWaitQueue(Drawing::GPUContext * gpuContext)238 void MemoryManager::FlushGpuMemoryInWaitQueue(Drawing::GPUContext* gpuContext)
239 {
240 #if defined(RS_ENABLE_VK)
241     if (!gpuContext) {
242         RS_LOGE("FlushGpuMemoryInWaitQueue fail, gpuContext is nullptr");
243         return;
244     }
245     gpuContext->FlushGpuMemoryInWaitQueue();
246 #endif
247 }
248 
SuppressGpuCacheBelowCertainRatio(Drawing::GPUContext * gpuContext,const std::function<bool (void)> & nextFrameHasArrived)249 void MemoryManager::SuppressGpuCacheBelowCertainRatio(
250     Drawing::GPUContext* gpuContext, const std::function<bool(void)>& nextFrameHasArrived)
251 {
252 #if defined(RS_ENABLE_VK)
253     if (!gpuContext) {
254         RS_LOGE("SuppressGpuCacheBelowCertainRatio fail, gpuContext is nullptr");
255         return;
256     }
257     gpuContext->SuppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
258 #endif
259 }
260 
GetAppGpuMemoryInMB(Drawing::GPUContext * gpuContext)261 float MemoryManager::GetAppGpuMemoryInMB(Drawing::GPUContext* gpuContext)
262 {
263     if (!gpuContext) {
264         return 0.f;
265     }
266 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
267     Drawing::TraceMemoryDump trace("category", true);
268     gpuContext->DumpMemoryStatistics(&trace);
269     auto total = trace.GetGpuMemorySizeInMB();
270     float rsMemSize = 0.f;
271     for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
272         Drawing::GPUResourceTag resourceTag(0, 0, 0, tagtype,
273             RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype)));
274         Drawing::TraceMemoryDump gpuTrace("category", true);
275         gpuContext->DumpMemoryStatisticsByTag(&gpuTrace, resourceTag);
276         rsMemSize += gpuTrace.GetGpuMemorySizeInMB();
277     }
278     return total - rsMemSize;
279 #else
280     return 0.f;
281 #endif
282 }
283 
DumpPidMemory(DfxString & log,int pid,const Drawing::GPUContext * gpuContext)284 void MemoryManager::DumpPidMemory(DfxString& log, int pid, const Drawing::GPUContext* gpuContext)
285 {
286     MemoryGraphic mem = CountPidMemory(pid, gpuContext);
287     log.AppendFormat("GPU Mem(MB):%f\n", mem.GetGpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
288     log.AppendFormat("CPU Mem(KB):%f\n", mem.GetCpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
289     log.AppendFormat("Total Mem(MB):%f\n", mem.GetTotalMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
290 }
291 
CountPidMemory(int pid,const Drawing::GPUContext * gpuContext)292 MemoryGraphic MemoryManager::CountPidMemory(int pid, const Drawing::GPUContext* gpuContext)
293 {
294     MemoryGraphic totalMemGraphic;
295 
296     // Count mem of RS
297     totalMemGraphic.SetPid(pid);
298 
299 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
300     // Count mem of Skia GPU
301     if (gpuContext) {
302         Drawing::TraceMemoryDump gpuTracer("category", true);
303         Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
304         gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, tag);
305         float gpuMem = gpuTracer.GetGLMemorySize();
306         totalMemGraphic.IncreaseGpuMemory(gpuMem);
307     }
308 #endif
309 
310     return totalMemGraphic;
311 }
312 
CountMemory(std::vector<pid_t> pids,const Drawing::GPUContext * gpuContext,std::vector<MemoryGraphic> & mems)313 void MemoryManager::CountMemory(
314     std::vector<pid_t> pids, const Drawing::GPUContext* gpuContext, std::vector<MemoryGraphic>& mems)
315 {
316     auto countMem = [&gpuContext, &mems] (pid_t pid) {
317         mems.emplace_back(CountPidMemory(pid, gpuContext));
318     };
319     // Count mem of Skia GPU
320     std::for_each(pids.begin(), pids.end(), countMem);
321 }
322 
FindGeoById(uint64_t nodeId)323 static std::tuple<uint64_t, std::string, RectI> FindGeoById(uint64_t nodeId)
324 {
325     constexpr int maxTreeDepth = 256;
326     const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
327     auto node = nodeMap.GetRenderNode<RSRenderNode>(nodeId);
328     uint64_t windowId = nodeId;
329     std::string windowName = "NONE";
330     RectI nodeFrameRect;
331     if (!node) {
332         return { windowId, windowName, nodeFrameRect };
333     }
334     nodeFrameRect =
335         (node->GetRenderProperties().GetBoundsGeometry())->GetAbsRect();
336     // Obtain the window according to childId
337     auto parent = node->GetParent().lock();
338     bool windowsNameFlag = false;
339     int seekDepth = 0;
340     while (parent && seekDepth < maxTreeDepth) {
341         if (parent->IsInstanceOf<RSSurfaceRenderNode>()) {
342             const auto& surfaceNode = RSBaseRenderNode::ReinterpretCast<RSSurfaceRenderNode>(parent);
343             windowName = surfaceNode->GetName();
344             windowId = surfaceNode->GetId();
345             windowsNameFlag = true;
346             break;
347         }
348         parent = parent->GetParent().lock();
349         seekDepth++;
350     }
351     if (!windowsNameFlag) {
352         windowName = "EXISTS-BUT-NO-SURFACE";
353     }
354     return { windowId, windowName, nodeFrameRect };
355 }
356 
DumpRenderServiceMemory(DfxString & log)357 void MemoryManager::DumpRenderServiceMemory(DfxString& log)
358 {
359     log.AppendFormat("\n----------\nRenderService caches:\n");
360     MemoryTrack::Instance().DumpMemoryStatistics(log, FindGeoById);
361     RSMainThread::Instance()->RenderServiceAllNodeDump(log);
362     RSMainThread::Instance()->RenderServiceAllSurafceDump(log);
363 }
364 
DumpDrawingCpuMemory(DfxString & log)365 void MemoryManager::DumpDrawingCpuMemory(DfxString& log)
366 {
367     // CPU
368     std::string cpuInfo = "Skia CPU caches : pid:" + std::to_string(getpid()) +
369         ", threadId:" + std::to_string(gettid());
370 #ifdef ROSEN_OHOS
371     char threadName[16]; // thread name is restricted to 16 bytes
372     auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
373     if (result == 0) {
374         cpuInfo = cpuInfo + ", threadName: " + threadName;
375     }
376 #endif
377     log.AppendFormat("\n----------\n%s\n", cpuInfo.c_str());
378     log.AppendFormat("Font Cache (CPU):\n");
379     log.AppendFormat("  Size: %.2f kB \n", Drawing::SkiaGraphics::GetFontCacheUsed() / MEMUNIT_RATE);
380     log.AppendFormat("  Glyph Count: %d \n", Drawing::SkiaGraphics::GetFontCacheCountUsed());
381 
382     std::vector<ResourcePair> cpuResourceMap = {
383         { "skia/sk_resource_cache/bitmap_", "Bitmaps" },
384         { "skia/sk_resource_cache/rrect-blur_", "Masks" },
385         { "skia/sk_resource_cache/rects-blur_", "Masks" },
386         { "skia/sk_resource_cache/tessellated", "Shadows" },
387         { "skia/sk_resource_cache/yuv-planes_", "YUVPlanes" },
388         { "skia/sk_resource_cache/budget_glyph_count", "Bitmaps" },
389     };
390     SkiaMemoryTracer cpuTracer(cpuResourceMap, true);
391     Drawing::SkiaGraphics::DumpMemoryStatistics(&cpuTracer);
392     log.AppendFormat("CPU Cachesxx:\n");
393     cpuTracer.LogOutput(log);
394     log.AppendFormat("Total CPU memory usage:\n");
395     cpuTracer.LogTotals(log);
396 
397     // cache limit
398     size_t cacheLimit = Drawing::SkiaGraphics::GetResourceCacheTotalByteLimit();
399     size_t fontCacheLimit = Drawing::SkiaGraphics::GetFontCacheLimit();
400     log.AppendFormat("\ncpu cache limit = %zu ( fontcache = %zu ):\n", cacheLimit, fontCacheLimit);
401 }
402 
DumpGpuCache(DfxString & log,const Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag * tag,std::string & name)403 void MemoryManager::DumpGpuCache(
404     DfxString& log, const Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag* tag, std::string& name)
405 {
406     if (!gpuContext) {
407         log.AppendFormat("gpuContext is nullptr.\n");
408         return;
409     }
410     /* GPU */
411 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
412     log.AppendFormat("\n---------------\nSkia GPU Caches:%s\n", name.c_str());
413     Drawing::TraceMemoryDump gpuTracer("category", true);
414     if (tag) {
415         gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, *tag);
416     } else {
417         gpuContext->DumpMemoryStatistics(&gpuTracer);
418 #ifdef RS_ENABLE_VK
419         RsVulkanMemStat& memStat = RsVulkanContext::GetSingleton().GetRsVkMemStat();
420         memStat.DumpMemoryStatistics(&gpuTracer);
421 #endif
422     }
423     gpuTracer.LogOutput(log);
424     log.AppendFormat("Total GPU memory usage:\n");
425     gpuTracer.LogTotals(log);
426 #endif
427 }
428 
DumpAllGpuInfo(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)429 void MemoryManager::DumpAllGpuInfo(DfxString& log, const Drawing::GPUContext* gpuContext,
430     std::vector<std::pair<NodeId, std::string>>& nodeTags)
431 {
432     if (!gpuContext) {
433         log.AppendFormat("No valid gpu cache instance.\n");
434         return;
435     }
436 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
437     for (auto& nodeTag : nodeTags) {
438         Drawing::GPUResourceTag tag(ExtractPid(nodeTag.first), 0, nodeTag.first, 0, nodeTag.second);
439         DumpGpuCache(log, gpuContext, &tag, nodeTag.second);
440     }
441 #endif
442 }
443 
DumpDrawingGpuMemory(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)444 void MemoryManager::DumpDrawingGpuMemory(DfxString& log, const Drawing::GPUContext* gpuContext,
445     std::vector<std::pair<NodeId, std::string>>& nodeTags)
446 {
447     if (!gpuContext) {
448         log.AppendFormat("No valid gpu cache instance.\n");
449         return;
450     }
451     /* GPU */
452 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
453     std::string gpuInfo = "pid:" + std::to_string(getpid()) + ", threadId:" + std::to_string(gettid());
454 #ifdef ROSEN_OHOS
455     char threadName[16]; // thread name is restricted to 16 bytes
456     auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
457     if (result == 0) {
458         gpuInfo = gpuInfo + ", threadName: " + threadName;
459     }
460 #endif
461     // total
462     DumpGpuCache(log, gpuContext, nullptr, gpuInfo);
463     // Get memory of window by tag
464     DumpAllGpuInfo(log, gpuContext, nodeTags);
465     for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
466         std::string tagTypeName = RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype));
467         Drawing::GPUResourceTag tag(0, 0, 0, tagtype, tagTypeName);
468         DumpGpuCache(log, gpuContext, &tag, tagTypeName);
469     }
470     // cache limit
471     size_t cacheLimit = 0;
472     size_t cacheUsed = 0;
473     gpuContext->GetResourceCacheLimits(nullptr, &cacheLimit);
474     gpuContext->GetResourceCacheUsage(nullptr, &cacheUsed);
475     log.AppendFormat("\ngpu limit = %zu ( used = %zu ):\n", cacheLimit, cacheUsed);
476 
477     /* ShaderCache */
478     log.AppendFormat("\n---------------\nShader Caches:\n");
479     std::shared_ptr<RenderContext> rendercontext = std::make_shared<RenderContext>();
480     log.AppendFormat(rendercontext->GetShaderCacheSize().c_str());
481     // gpu stat
482     DumpGpuStats(log, gpuContext);
483 #endif
484 }
485 
DumpGpuStats(DfxString & log,const Drawing::GPUContext * gpuContext)486 void MemoryManager::DumpGpuStats(DfxString& log, const Drawing::GPUContext* gpuContext)
487 {
488     log.AppendFormat("\n---------------\ndumpGpuStats:\n");
489     std::string stat;
490     gpuContext->DumpGpuStats(stat);
491 
492     size_t statIndex = 0;
493     size_t statLength = stat.length();
494     while (statIndex < statLength) {
495         std::string statSubStr;
496         if (statLength - statIndex > DUPM_STRING_BUF_SIZE) {
497             statSubStr = stat.substr(statIndex, DUPM_STRING_BUF_SIZE);
498             statIndex += DUPM_STRING_BUF_SIZE;
499         } else {
500             statSubStr = stat.substr(statIndex, statLength - statIndex);
501             statIndex = statLength;
502         }
503         log.AppendFormat("%s", statSubStr.c_str());
504     }
505     log.AppendFormat("\ndumpGpuStats end\n---------------\n");
506 #if defined (SK_VULKAN) && defined (SKIA_DFX_FOR_RECORD_VKIMAGE)
507     if (ParallelDebug::IsVkImageDfxEnabled()) {
508         static thread_local int tid = gettid();
509         log.AppendFormat("\n------------------\n[%s:%d] dumpAllResource:\n", GetThreadName(), tid);
510         std::stringstream allResources;
511         gpuContext->DumpAllResource(allResources);
512         std::string s;
513         while (std::getline(allResources, s, '\n')) {
514             log.AppendFormat("%s\n", s.c_str());
515         }
516     }
517 #endif
518 }
519 
ProcessJemallocString(std::string * sp,const char * str)520 void ProcessJemallocString(std::string* sp, const char* str)
521 {
522     sp->append("strbuf size = " + std::to_string(strlen(str)) + "\n");
523     // split ///////////////////////////////
524     std::vector<std::string> lines;
525     std::string currentLine;
526 
527     for (int i = 0; str[i] != '\0' && i < INT_MAX; ++i) {
528         if (str[i] == '\n') {
529             lines.push_back(currentLine);
530             currentLine.clear();
531         } else {
532             currentLine += str[i];
533         }
534     }
535     // last line
536     if (!currentLine.empty()) {
537         lines.push_back(currentLine);
538     }
539 
540     // compute tcache and decay free ///////////////////////
541     // tcache_bytes:                     784
542     // decaying:  time       npages       sweeps     madvises       purged
543     //   dirty:   N/A           94         5084        55957       295998
544     //   muzzy:   N/A            0         3812        39219       178519
545     const char* strArray[] = {"tcache_bytes:", "decaying:", "   dirty:", "   muzzy:"};
546     size_t size = sizeof(strArray) / sizeof(strArray[0]);
547     size_t total = 0;
548     for (const auto& line : lines) {
549         for (size_t i = 0; i < size; ++i) {
550             if (strncmp(line.c_str(), strArray[i], strlen(strArray[i])) == 0) {
551                 sp->append(line + "\n");
552                 total ++;
553             }
554         }
555 
556         // get first one: (the total one, others are separated by threads)
557         if (total >= size) {
558             break;
559         }
560     }
561 }
562 
DumpMallocStat(std::string & log)563 void MemoryManager::DumpMallocStat(std::string& log)
564 {
565     log.append("malloc stats :\n");
566 
567     malloc_stats_print(
568         [](void* fp, const char* str) {
569             if (!fp) {
570                 RS_LOGE("DumpMallocStat fp is nullptr");
571                 return;
572             }
573             std::string* sp = static_cast<std::string*>(fp);
574             if (str) {
575                 ProcessJemallocString(sp, str);
576                 RS_LOGW("[mallocstat]:%{public}s", str);
577             }
578         },
579         &log, nullptr);
580 }
581 
DumpMemorySnapshot(DfxString & log)582 void MemoryManager::DumpMemorySnapshot(DfxString& log)
583 {
584     size_t totalMemory = MemorySnapshot::Instance().GetTotalMemory();
585     log.AppendFormat("\n---------------\nmemorySnapshots, totalMemory %zuKB\n", totalMemory / MEMUNIT_RATE);
586     std::unordered_map<pid_t, MemorySnapshotInfo> memorySnapshotInfo;
587     MemorySnapshot::Instance().GetMemorySnapshot(memorySnapshotInfo);
588     for (auto& [pid, snapshotInfo] : memorySnapshotInfo) {
589         std::string infoStr = "pid: " + std::to_string(pid) +
590             ", cpu: " + std::to_string(snapshotInfo.cpuMemory / MEMUNIT_RATE) +
591             "KB, gpu: " + std::to_string(snapshotInfo.gpuMemory / MEMUNIT_RATE) + "KB";
592         log.AppendFormat("%s\n", infoStr.c_str());
593     }
594 }
595 
ParseMemoryLimit(const cJSON * json,const char * name)596 uint64_t ParseMemoryLimit(const cJSON* json, const char* name)
597 {
598     cJSON* jsonItem = cJSON_GetObjectItem(json, name);
599     if (jsonItem != nullptr && cJSON_IsNumber(jsonItem)) {
600         return static_cast<uint64_t>(jsonItem->valueint) * MEMUNIT_RATE * MEMUNIT_RATE;
601     }
602     return UINT64_MAX;
603 }
604 
InitMemoryLimit()605 void MemoryManager::InitMemoryLimit()
606 {
607     auto featureParam = GraphicFeatureParamManager::GetInstance().GetFeatureParam(FEATURE_CONFIGS[MEM]);
608     if (!featureParam) {
609         RS_LOGE("MemoryManager::InitMemoryLimit can not get mem featureParam");
610         return;
611     }
612     std::string rsWatchPointParamName = std::static_pointer_cast<MEMParam>(featureParam)->GetRSWatchPoint();
613     if (rsWatchPointParamName.empty()) {
614         RS_LOGI("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
615         return;
616     }
617 
618     std::ifstream configFile;
619     configFile.open(KERNEL_CONFIG_PATH);
620     if (!configFile.is_open()) {
621         RS_LOGE("MemoryManager::InitMemoryLimit can not open config file");
622         return;
623     }
624     std::stringstream filterParamsStream;
625     filterParamsStream << configFile.rdbuf();
626     configFile.close();
627     std::string paramsString = filterParamsStream.str();
628 
629     cJSON* root = cJSON_Parse(paramsString.c_str());
630     if (root == nullptr) {
631         RS_LOGE("MemoryManager::InitMemoryLimit can not parse config to json");
632         return;
633     }
634     cJSON* kernelLeak = cJSON_GetObjectItem(root, "KernelLeak");
635     if (kernelLeak == nullptr) {
636         RS_LOGE("MemoryManager::InitMemoryLimit can not find kernelLeak");
637         cJSON_Delete(root);
638         return;
639     }
640     cJSON* version = cJSON_GetObjectItem(kernelLeak, RSSystemProperties::GetVersionType().c_str());
641     if (version == nullptr) {
642         RS_LOGE("MemoryManager::InitMemoryLimit can not find version");
643         cJSON_Delete(root);
644         return;
645     }
646     cJSON* rsWatchPoint = cJSON_GetObjectItem(version, rsWatchPointParamName.c_str());
647     if (rsWatchPoint == nullptr) {
648         RS_LOGE("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
649         cJSON_Delete(root);
650         return;
651     }
652     // warning threshold for total memory of a single process
653     memoryWarning_ = ParseMemoryLimit(rsWatchPoint, "process_warning_threshold");
654     // error threshold for cpu memory of a single process
655     uint64_t cpuMemoryControl = ParseMemoryLimit(rsWatchPoint, "process_cpu_control_threshold");
656     // error threshold for gpu memory of a single process
657     gpuMemoryControl_ = ParseMemoryLimit(rsWatchPoint, "process_gpu_control_threshold");
658     // threshold for the total memory of all processes in renderservice
659     uint64_t totalMemoryWarning = ParseMemoryLimit(rsWatchPoint, "total_threshold");
660     cJSON_Delete(root);
661 
662     MemorySnapshot::Instance().InitMemoryLimit(MemoryOverflow, memoryWarning_, cpuMemoryControl, totalMemoryWarning);
663 }
664 
SetGpuMemoryLimit(Drawing::GPUContext * gpuContext)665 void MemoryManager::SetGpuMemoryLimit(Drawing::GPUContext* gpuContext)
666 {
667     if (gpuContext == nullptr || gpuMemoryControl_ == UINT64_MAX) {
668         RS_LOGW("MemoryManager::SetGpuMemoryLimit gpuContext is nullptr or gpuMemoryControl_ is uninitialized");
669         return;
670     }
671     gpuContext->InitGpuMemoryLimit(MemoryOverflow, gpuMemoryControl_);
672 }
673 
MemoryOverCheck(Drawing::GPUContext * gpuContext)674 void MemoryManager::MemoryOverCheck(Drawing::GPUContext* gpuContext)
675 {
676 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
677     frameCount_++;
678     if (!gpuContext || frameCount_ < FRAME_NUMBER) {
679         return;
680     }
681     frameCount_ = 0;
682     std::unordered_map<pid_t, size_t> gpuMemory;
683     gpuContext->GetUpdatedMemoryMap(gpuMemory);
684 
685     auto task = [gpuMemory = std::move(gpuMemory)]() {
686         std::unordered_map<pid_t, MemorySnapshotInfo> infoMap;
687         bool isTotalOver = false;
688         MemorySnapshot::Instance().UpdateGpuMemoryInfo(gpuMemory, infoMap, isTotalOver);
689         auto now = std::chrono::steady_clock::now().time_since_epoch();
690         uint64_t currentTime = std::chrono::duration_cast<std::chrono::milliseconds>(now).count();
691         // total memory overflow of all processes in renderservice
692         if (isTotalOver && currentTime > totalMemoryReportTime_) {
693             TotalMemoryOverReport(infoMap);
694             totalMemoryReportTime_ = currentTime + MEMORY_REPORT_INTERVAL;
695         }
696 
697         std::string bundleName;
698         bool needReport = false;
699         for (const auto& [pid, memoryInfo] : infoMap) {
700             if (memoryInfo.TotalMemory() <= memoryWarning_) {
701                 continue;
702             }
703             needReport = false;
704             {
705                 std::lock_guard<std::mutex> lock(mutex_);
706                 auto it = pidInfo_.find(pid);
707                 if (it == pidInfo_.end()) {
708                     int32_t uid;
709                     auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
710                     appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
711                     pidInfo_.emplace(pid, std::make_pair(bundleName, currentTime + MEMORY_REPORT_INTERVAL));
712                     needReport = true;
713                 } else if (currentTime > it->second.second) {
714                     it->second.second = currentTime + MEMORY_REPORT_INTERVAL;
715                     bundleName = it->second.first;
716                     needReport = true;
717                 }
718             }
719             if (needReport) {
720                 MemoryOverReport(pid, memoryInfo, bundleName, RSEventName::RENDER_MEMORY_OVER_WARNING);
721             }
722         }
723     };
724     RSBackgroundThread::Instance().PostTask(task);
725 #endif
726 }
727 
KillProcessByPid(const pid_t pid,const std::string & processName,const std::string & reason)728 static void KillProcessByPid(const pid_t pid, const std::string& processName, const std::string& reason)
729 {
730 #ifdef RES_SCHED_ENABLE
731     std::unordered_map<std::string, std::string> killInfo;
732     killInfo["pid"] = std::to_string(pid);
733     killInfo["processName"] = processName;
734     killInfo["killReason"] = reason;
735     if (pid > 0) {
736         int32_t eventWriteStatus = -1;
737         int32_t killStatus = ResourceSchedule::ResSchedClient::GetInstance().KillProcess(killInfo);
738         if (killStatus == 0) {
739             RS_TRACE_NAME("KillProcessByPid HiSysEventWrite");
740             eventWriteStatus = HiSysEventWrite(HiviewDFX::HiSysEvent::Domain::FRAMEWORK, "PROCESS_KILL",
741                 HiviewDFX::HiSysEvent::EventType::FAULT, "PID", pid, "PROCESS_NAME", processName,
742                 "MSG", reason, "FOREGROUND", false);
743         }
744         // To prevent the print from being filtered, use RS_LOGE.
745         RS_LOGE("KillProcessByPid, pid: %{public}d, process name: %{public}s, "
746             "killStatus: %{public}d, eventWriteStatus: %{public}d, reason: %{public}s",
747             static_cast<int32_t>(pid), processName.c_str(), killStatus, eventWriteStatus, reason.c_str());
748     }
749 #endif
750 }
751 
MemoryOverflow(pid_t pid,size_t overflowMemory,bool isGpu)752 void MemoryManager::MemoryOverflow(pid_t pid, size_t overflowMemory, bool isGpu)
753 {
754     MemorySnapshotInfo info;
755     MemorySnapshot::Instance().GetMemorySnapshotInfoByPid(pid, info);
756     if (isGpu) {
757         info.gpuMemory = overflowMemory;
758     }
759     int32_t uid;
760     std::string bundleName;
761     auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
762     appMgrClient.GetBundleNameByPid(pid, bundleName, uid);
763     RSMainThread::Instance()->PostTask([]() {
764         RS_TRACE_NAME_FMT("RSMem Dump Task");
765         std::unordered_set<std::u16string> argSets;
766         std::string dumpString = "";
767         std::string type = MEM_SNAPSHOT;
768         RSMainThread::Instance()->DumpMem(argSets, dumpString, type, 0);
769         RS_LOGI("=======================RSMem Dump Info=======================");
770         std::istringstream stream(dumpString);
771         std::string line;
772         while (std::getline(stream, line)) {
773             RS_LOGI("%{public}s", line.c_str());
774         }
775         RS_LOGI("=============================================================");
776     });
777     std::string reason = "RENDER_MEMORY_OVER_ERROR: cpu[" + std::to_string(info.cpuMemory)
778         + "], gpu[" + std::to_string(info.gpuMemory) + "], total["
779         + std::to_string(info.TotalMemory()) + "]";
780     MemoryOverReport(pid, info, bundleName, RSEventName::RENDER_MEMORY_OVER_ERROR);
781     KillProcessByPid(pid, bundleName, reason);
782     RS_LOGE("RSMemoryOverflow pid[%{public}d] cpu[%{public}zu] gpu[%{public}zu]", pid, info.cpuMemory, info.gpuMemory);
783 }
784 
MemoryOverReport(const pid_t pid,const MemorySnapshotInfo & info,const std::string & bundleName,const std::string & reportName)785 void MemoryManager::MemoryOverReport(const pid_t pid, const MemorySnapshotInfo& info, const std::string& bundleName,
786     const std::string& reportName)
787 {
788     RS_TRACE_NAME("MemoryManager::MemoryOverReport HiSysEventWrite");
789     int ret = RSHiSysEvent::EventWrite(reportName, RSEventType::RS_STATISTIC,
790         "PID", pid,
791         "BUNDLE_NAME", bundleName,
792         "CPU_MEMORY", info.cpuMemory,
793         "GPU_MEMORY", info.gpuMemory,
794         "TOTAL_MEMORY", info.TotalMemory());
795     RS_LOGW("RSMemoryOverReport pid[%{public}d] bundleName[%{public}s] cpu[%{public}zu] "
796         "gpu[%{public}zu] total[%{public}zu] ret[%{public}d]",
797         pid, bundleName.c_str(), info.cpuMemory, info.gpuMemory, info.TotalMemory(), ret);
798 }
799 
TotalMemoryOverReport(const std::unordered_map<pid_t,MemorySnapshotInfo> & infoMap)800 void MemoryManager::TotalMemoryOverReport(const std::unordered_map<pid_t, MemorySnapshotInfo>& infoMap)
801 {
802     std::ostringstream oss;
803     for (const auto& info : infoMap) {
804         oss << info.first << '_' << info.second.TotalMemory() << ' ';
805     }
806     HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, "RENDER_MEMORY_OVER_TOTAL_ERROR",
807         OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "MEMORY_MSG", oss.str());
808 }
809 
ErasePidInfo(const std::set<pid_t> & exitedPidSet)810 void MemoryManager::ErasePidInfo(const std::set<pid_t>& exitedPidSet)
811 {
812     std::lock_guard<std::mutex> lock(mutex_);
813     for (auto pid : exitedPidSet) {
814         pidInfo_.erase(pid);
815     }
816 }
817 
VmaDefragment(Drawing::GPUContext * gpuContext)818 void MemoryManager::VmaDefragment(Drawing::GPUContext* gpuContext)
819 {
820 #if defined(RS_ENABLE_VK)
821     if (!gpuContext) {
822         RS_LOGE("VmaDefragment fail, gpuContext is nullptr");
823         return;
824     }
825     RS_TRACE_NAME_FMT("VmaDefragment");
826     gpuContext->VmaDefragment();
827 #endif
828 }
829 
DumpExitPidMem(std::string & log,int pid)830 void MemoryManager::DumpExitPidMem(std::string& log, int pid)
831 {
832     RS_TRACE_NAME_FMT("DumpExitPidMem");
833     DfxString dfxlog;
834     auto mem = MemoryTrack::Instance().CountRSMemory(pid);
835     size_t allNodeAndPixelmapSize = mem.GetTotalMemorySize();
836     dfxlog.AppendFormat("allNodeAndPixelmapSize: %zu \n", allNodeAndPixelmapSize);
837 
838     size_t allModifySize = 0;
839     RSMainThread::Instance()->ScheduleTask([pid, &allModifySize] () {
840         const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
841         nodeMap.TraversalNodesByPid(pid, [&allModifySize] (const std::shared_ptr<RSBaseRenderNode>& node) {
842             allModifySize += node->GetAllModifierSize();
843         });
844     }).wait();
845     dfxlog.AppendFormat("allModifySize: %zu \n", allModifySize);
846 
847     size_t allGpuSize = 0;
848     RSUniRenderThread::Instance().PostSyncTask([&allGpuSize, pid] {
849         MemoryGraphic mem = CountPidMemory(pid,
850             RSUniRenderThread::Instance().GetRenderEngine()->GetRenderContext()->GetDrGPUContext());
851         allGpuSize += static_cast<size_t>(mem.GetGpuMemorySize());
852     });
853     dfxlog.AppendFormat("allGpuSize: %zu \n", allGpuSize);
854     dfxlog.AppendFormat("pid: %d totalSize: %zu \n", pid, (allNodeAndPixelmapSize + allModifySize + allGpuSize));
855     log.append(dfxlog.GetString());
856 }
857 
Instance()858 RSReclaimMemoryManager& RSReclaimMemoryManager::Instance()
859 {
860     static RSReclaimMemoryManager instance;
861     return instance;
862 }
863 
TriggerReclaimTask()864 void RSReclaimMemoryManager::TriggerReclaimTask()
865 {
866     // Clear two Applications in one second, post task to reclaim.
867     auto& unirenderThread = RSUniRenderThread::Instance();
868     if (!unirenderThread.IsTimeToReclaim()) {
869         static std::chrono::steady_clock::time_point lastClearAppTime = std::chrono::steady_clock::now();
870         auto currentTime = std::chrono::steady_clock::now();
871         bool isTimeToReclaim = std::chrono::duration_cast<std::chrono::milliseconds>(
872             currentTime - lastClearAppTime).count() < CLEAR_TWO_APPS_TIME;
873         if (isTimeToReclaim) {
874             unirenderThread.ReclaimMemory();
875             unirenderThread.SetTimeToReclaim(true);
876             isReclaimInterrupt_.store(false);
877         }
878         lastClearAppTime = currentTime;
879     }
880 }
881 
InterruptReclaimTask(const std::string & sceneId)882 void RSReclaimMemoryManager::InterruptReclaimTask(const std::string& sceneId)
883 {
884     // When operate in launcher, interrupt reclaim task.
885     if (!isReclaimInterrupt_.load() && sceneId != EVENT_ENTER_RECENTS) {
886         isReclaimInterrupt_.store(true);
887     }
888 }
889 
890 } // namespace OHOS::Rosen