1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "memory/rs_memory_manager.h"
17
18 #include <dirent.h>
19 #include <filesystem>
20 #include <fstream>
21 #include <malloc.h>
22 #include <sstream>
23 #include <string>
24 #include <sys/prctl.h>
25 #include "include/core/SkGraphics.h"
26 #include "rs_trace.h"
27 #include "cJSON.h"
28
29 #include "memory/rs_dfx_string.h"
30 #include "skia_adapter/rs_skia_memory_tracer.h"
31 #include "skia_adapter/skia_graphics.h"
32 #include "memory/rs_memory_graphic.h"
33 #ifdef USE_M133_SKIA
34 #include "include/gpu/ganesh/GrDirectContext.h"
35 #include "src/gpu/ganesh/GrDirectContextPriv.h"
36 #else
37 #include "include/gpu/GrDirectContext.h"
38 #include "src/gpu/GrDirectContextPriv.h"
39 #endif
40 #include "include/gpu/vk/GrVulkanTrackerInterface.h"
41
42 #include "common/rs_background_thread.h"
43 #include "common/rs_obj_abs_geometry.h"
44 #include "common/rs_singleton.h"
45 #include "feature/uifirst/rs_sub_thread_manager.h"
46 #include "feature_cfg/feature_param/extend_feature/mem_param.h"
47 #include "feature_cfg/graphic_feature_param_manager.h"
48 #include "memory/rs_tag_tracker.h"
49 #include "pipeline/main_thread/rs_main_thread.h"
50 #include "pipeline/rs_surface_render_node.h"
51 #include "platform/common/rs_log.h"
52 #include "platform/common/rs_system_properties.h"
53
54 #include "app_mgr_client.h"
55 #include "hisysevent.h"
56 #include "image/gpu_context.h"
57 #include "platform/common/rs_hisysevent.h"
58
59 #ifdef RS_ENABLE_UNI_RENDER
60 #include "ability_manager_client.h"
61 #endif
62
63 #ifdef RS_ENABLE_VK
64 #include "feature/gpuComposition/rs_vk_image_manager.h"
65 #include "platform/ohos/backend/rs_vulkan_context.h"
66 #endif
GetThreadName()67 static inline const char* GetThreadName()
68 {
69 static constexpr int nameLen = 16;
70 static thread_local char threadName[nameLen + 1] = "";
71 if (threadName[0] == 0) {
72 prctl(PR_GET_NAME, threadName);
73 threadName[nameLen] = 0;
74 }
75 return threadName;
76 }
77
78 namespace OHOS::Rosen {
79 namespace {
80 const std::string KERNEL_CONFIG_PATH = "/system/etc/hiview/kernel_leak_config.json";
81 const std::string GPUMEM_INFO_PATH = "/proc/gpumem_process_info";
82 const std::string EVENT_ENTER_RECENTS = "GESTURE_TO_RECENTS";
83 const std::string GPU_RS_LEAK = "ResourceLeak(GpuRsLeak)";
84 constexpr uint32_t MEMUNIT_RATE = 1024;
85 constexpr uint32_t MEMORY_REPORT_INTERVAL = 24 * 60 * 60 * 1000; // Each process can report at most once a day.
86 constexpr uint32_t FRAME_NUMBER = 10; // Check memory every ten frames.
87 constexpr uint32_t CLEAR_TWO_APPS_TIME = 1000; // 1000ms
88 constexpr const char* MEM_RS_TYPE = "renderservice";
89 constexpr const char* MEM_CPU_TYPE = "cpu";
90 constexpr const char* MEM_GPU_TYPE = "gpu";
91 constexpr const char* MEM_SNAPSHOT = "snapshot";
92 constexpr int DUPM_STRING_BUF_SIZE = 4000;
93 constexpr int KILL_PROCESS_TYPE = 301;
94 constexpr int RETAIN_FILE_NUM = 10;
95 }
96
97 std::mutex MemoryManager::mutex_;
98 std::unordered_map<pid_t, uint64_t> MemoryManager::pidInfo_;
99 uint32_t MemoryManager::frameCount_ = 0;
100 uint64_t MemoryManager::memoryWarning_ = UINT64_MAX;
101 uint64_t MemoryManager::gpuMemoryControl_ = UINT64_MAX;
102 uint64_t MemoryManager::totalMemoryReportTime_ = 0;
103 std::unordered_set<pid_t> MemoryManager::processKillReportPidSet_;
104
DumpMemoryUsage(DfxString & log,std::string & type)105 void MemoryManager::DumpMemoryUsage(DfxString& log, std::string& type)
106 {
107 if (type.empty() || type == MEM_RS_TYPE) {
108 DumpRenderServiceMemory(log);
109 }
110 if (type.empty() || type == MEM_CPU_TYPE) {
111 DumpDrawingCpuMemory(log);
112 }
113 if (type.empty() || type == MEM_GPU_TYPE) {
114 RSUniRenderThread::Instance().DumpMem(log);
115 }
116 if (type.empty() || type == MEM_SNAPSHOT) {
117 DumpMemorySnapshot(log);
118 }
119 }
120
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)121 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
122 {
123 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
124 if (!gpuContext) {
125 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
126 return;
127 }
128 RS_TRACE_NAME_FMT("ReleaseAllGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
129 tag.fPid, tag.fTid, tag.fWid, tag.fFid);
130 gpuContext->ReleaseByTag(tag);
131 #endif
132 }
133
ReleaseAllGpuResource(Drawing::GPUContext * gpuContext,pid_t pid)134 void MemoryManager::ReleaseAllGpuResource(Drawing::GPUContext* gpuContext, pid_t pid)
135 {
136 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
137 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseAllGpuResource");
138 ReleaseAllGpuResource(gpuContext, tag);
139 #endif
140 }
141
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag & tag)142 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag& tag)
143 {
144 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
145 if (!gpuContext) {
146 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
147 return;
148 }
149 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource [Pid:%d Tid:%d Nid:%d Funcid:%d]",
150 tag.fPid, tag.fTid, tag.fWid, tag.fFid);
151 gpuContext->PurgeUnlockedResourcesByTag(false, tag);
152 #endif
153 }
154
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,std::set<pid_t> exitedPidSet)155 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, std::set<pid_t> exitedPidSet)
156 {
157 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
158 if (!gpuContext) {
159 RS_LOGE("ReleaseGpuResByPid fail, gpuContext is nullptr");
160 return;
161 }
162 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource exitedPidSet size: %d", exitedPidSet.size());
163 gpuContext->PurgeUnlockedResourcesByPid(false, exitedPidSet);
164 MemorySnapshot::Instance().EraseSnapshotInfoByPid(exitedPidSet);
165 ErasePidInfo(exitedPidSet);
166 #endif
167 }
168
PurgeCacheBetweenFrames(Drawing::GPUContext * gpuContext,bool scratchResourceOnly,std::set<pid_t> & exitedPidSet,std::set<pid_t> & protectedPidSet)169 void MemoryManager::PurgeCacheBetweenFrames(Drawing::GPUContext* gpuContext, bool scratchResourceOnly,
170 std::set<pid_t>& exitedPidSet, std::set<pid_t>& protectedPidSet)
171 {
172 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
173 if (!gpuContext) {
174 RS_LOGE("PurgeCacheBetweenFrames fail, gpuContext is nullptr");
175 return;
176 }
177 gpuContext->PurgeCacheBetweenFrames(scratchResourceOnly, exitedPidSet, protectedPidSet);
178 #endif
179 }
180
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,NodeId surfaceNodeId)181 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, NodeId surfaceNodeId)
182 {
183 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
184 Drawing::GPUResourceTag tag(ExtractPid(surfaceNodeId), 0, 0, 0, "ReleaseUnlockGpuResource");
185 ReleaseUnlockGpuResource(grContext, tag);
186 #endif
187 }
188
ReleaseUnlockGpuResource(Drawing::GPUContext * grContext,pid_t pid)189 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* grContext, pid_t pid)
190 {
191 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
192 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
193 ReleaseUnlockGpuResource(grContext, tag); // clear gpu resource by pid
194 #endif
195 }
196
ReleaseUnlockGpuResource(Drawing::GPUContext * gpuContext,bool scratchResourcesOnly)197 void MemoryManager::ReleaseUnlockGpuResource(Drawing::GPUContext* gpuContext, bool scratchResourcesOnly)
198 {
199 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
200 if (!gpuContext) {
201 RS_LOGE("ReleaseGpuResByTag fail, gpuContext is nullptr");
202 return;
203 }
204 RS_TRACE_NAME_FMT("ReleaseUnlockGpuResource scratchResourcesOnly:%d", scratchResourcesOnly);
205 gpuContext->PurgeUnlockedResources(scratchResourcesOnly);
206 #endif
207 }
208
ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext * gpuContext)209 void MemoryManager::ReleaseUnlockAndSafeCacheGpuResource(Drawing::GPUContext* gpuContext)
210 {
211 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
212 if (!gpuContext) {
213 RS_LOGE("ReleaseUnlockAndSafeCacheGpuResource fail, gpuContext is nullptr");
214 return;
215 }
216 RS_TRACE_NAME_FMT("ReleaseUnlockAndSafeCacheGpuResource");
217 gpuContext->PurgeUnlockAndSafeCacheGpuResources();
218 #endif
219 }
220
SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext * gpuContext,bool enabled)221 void MemoryManager::SetGpuCacheSuppressWindowSwitch(Drawing::GPUContext* gpuContext, bool enabled)
222 {
223 #if defined(RS_ENABLE_VK)
224 if (!gpuContext) {
225 RS_LOGE("SetGpuCacheSuppressWindowSwitch fail, gpuContext is nullptr");
226 return;
227 }
228 gpuContext->SetGpuCacheSuppressWindowSwitch(enabled);
229 #endif
230 }
231
SetGpuMemoryAsyncReclaimerSwitch(Drawing::GPUContext * gpuContext,bool enabled,const std::function<void ()> & setThreadPriority)232 void MemoryManager::SetGpuMemoryAsyncReclaimerSwitch(
233 Drawing::GPUContext* gpuContext, bool enabled, const std::function<void()>& setThreadPriority)
234 {
235 #if defined(RS_ENABLE_VK)
236 if (!gpuContext) {
237 RS_LOGE("SetGpuMemoryAsyncReclaimerSwitch fail, gpuContext is nullptr");
238 return;
239 }
240 gpuContext->SetGpuMemoryAsyncReclaimerSwitch(enabled, setThreadPriority);
241 #endif
242 }
243
FlushGpuMemoryInWaitQueue(Drawing::GPUContext * gpuContext)244 void MemoryManager::FlushGpuMemoryInWaitQueue(Drawing::GPUContext* gpuContext)
245 {
246 #if defined(RS_ENABLE_VK)
247 if (!gpuContext) {
248 RS_LOGE("FlushGpuMemoryInWaitQueue fail, gpuContext is nullptr");
249 return;
250 }
251 gpuContext->FlushGpuMemoryInWaitQueue();
252 #endif
253 }
254
SuppressGpuCacheBelowCertainRatio(Drawing::GPUContext * gpuContext,const std::function<bool (void)> & nextFrameHasArrived)255 void MemoryManager::SuppressGpuCacheBelowCertainRatio(
256 Drawing::GPUContext* gpuContext, const std::function<bool(void)>& nextFrameHasArrived)
257 {
258 #if defined(RS_ENABLE_VK)
259 if (!gpuContext) {
260 RS_LOGE("SuppressGpuCacheBelowCertainRatio fail, gpuContext is nullptr");
261 return;
262 }
263 gpuContext->SuppressGpuCacheBelowCertainRatio(nextFrameHasArrived);
264 #endif
265 }
266
GetAppGpuMemoryInMB(Drawing::GPUContext * gpuContext)267 float MemoryManager::GetAppGpuMemoryInMB(Drawing::GPUContext* gpuContext)
268 {
269 if (!gpuContext) {
270 return 0.f;
271 }
272 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
273 Drawing::TraceMemoryDump trace("category", true);
274 gpuContext->DumpMemoryStatistics(&trace);
275 auto total = trace.GetGpuMemorySizeInMB();
276 float rsMemSize = 0.f;
277 for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
278 Drawing::GPUResourceTag resourceTag(0, 0, 0, tagtype,
279 RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype)));
280 Drawing::TraceMemoryDump gpuTrace("category", true);
281 gpuContext->DumpMemoryStatisticsByTag(&gpuTrace, resourceTag);
282 rsMemSize += gpuTrace.GetGpuMemorySizeInMB();
283 }
284 return total - rsMemSize;
285 #else
286 return 0.f;
287 #endif
288 }
289
DumpPidMemory(DfxString & log,int pid,const Drawing::GPUContext * gpuContext)290 void MemoryManager::DumpPidMemory(DfxString& log, int pid, const Drawing::GPUContext* gpuContext)
291 {
292 MemoryGraphic mem = CountPidMemory(pid, gpuContext);
293 log.AppendFormat("GPU Mem(MB):%f\n", mem.GetGpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
294 log.AppendFormat("CPU Mem(KB):%f\n", mem.GetCpuMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
295 log.AppendFormat("Total Mem(MB):%f\n", mem.GetTotalMemorySize() / (MEMUNIT_RATE * MEMUNIT_RATE));
296 }
297
CountPidMemory(int pid,const Drawing::GPUContext * gpuContext)298 MemoryGraphic MemoryManager::CountPidMemory(int pid, const Drawing::GPUContext* gpuContext)
299 {
300 MemoryGraphic totalMemGraphic;
301
302 // Count mem of RS
303 totalMemGraphic.SetPid(pid);
304
305 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
306 // Count mem of Skia GPU
307 if (gpuContext) {
308 Drawing::TraceMemoryDump gpuTracer("category", true);
309 Drawing::GPUResourceTag tag(pid, 0, 0, 0, "ReleaseUnlockGpuResource");
310 uint64_t totalGpuSize = gpuContext->NewDumpMemoryStatisticsByTag(&gpuTracer, tag);
311 totalMemGraphic.IncreaseGpuMemory(totalGpuSize);
312 }
313 #endif
314
315 return totalMemGraphic;
316 }
317
CountMemory(std::vector<pid_t> pids,const Drawing::GPUContext * gpuContext,std::vector<MemoryGraphic> & mems)318 void MemoryManager::CountMemory(
319 std::vector<pid_t> pids, const Drawing::GPUContext* gpuContext, std::vector<MemoryGraphic>& mems)
320 {
321 auto countMem = [&gpuContext, &mems] (pid_t pid) {
322 mems.emplace_back(CountPidMemory(pid, gpuContext));
323 };
324 // Count mem of Skia GPU
325 std::for_each(pids.begin(), pids.end(), countMem);
326 }
327
FindGeoById(uint64_t nodeId)328 static std::tuple<uint64_t, std::string, RectI, bool> FindGeoById(uint64_t nodeId)
329 {
330 constexpr int maxTreeDepth = 256;
331 const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
332 auto node = nodeMap.GetRenderNode<RSRenderNode>(nodeId);
333 uint64_t windowId = nodeId;
334 std::string windowName = "NONE";
335 RectI nodeFrameRect;
336 if (!node) {
337 return { windowId, windowName, nodeFrameRect, true };
338 }
339 nodeFrameRect =
340 (node->GetRenderProperties().GetBoundsGeometry())->GetAbsRect();
341 // Obtain the window according to childId
342 auto parent = node->GetParent().lock();
343 bool windowsNameFlag = false;
344 int seekDepth = 0;
345 while (parent && seekDepth < maxTreeDepth) {
346 if (parent->IsInstanceOf<RSSurfaceRenderNode>()) {
347 const auto& surfaceNode = RSBaseRenderNode::ReinterpretCast<RSSurfaceRenderNode>(parent);
348 windowName = surfaceNode->GetName();
349 windowId = surfaceNode->GetId();
350 windowsNameFlag = true;
351 break;
352 }
353 parent = parent->GetParent().lock();
354 seekDepth++;
355 }
356 if (!windowsNameFlag) {
357 windowName = "EXISTS-BUT-NO-SURFACE";
358 }
359 return { windowId, windowName, nodeFrameRect, false };
360 }
361
DumpRenderServiceMemory(DfxString & log)362 void MemoryManager::DumpRenderServiceMemory(DfxString& log)
363 {
364 log.AppendFormat("\n----------\nRenderService caches:\n");
365 MemoryTrack::Instance().DumpMemoryStatistics(log, FindGeoById);
366 RSMainThread::Instance()->RenderServiceAllNodeDump(log);
367 RSMainThread::Instance()->RenderServiceAllSurafceDump(log);
368 #ifdef RS_ENABLE_VK
369 RsVulkanMemStat& memStat = RsVulkanContext::GetSingleton().GetRsVkMemStat();
370 memStat.DumpMemoryStatistics(&gpuTracer);
371 #endif
372 }
373
DumpDrawingCpuMemory(DfxString & log)374 void MemoryManager::DumpDrawingCpuMemory(DfxString& log)
375 {
376 // CPU
377 std::string cpuInfo = "Skia CPU caches : pid:" + std::to_string(getpid()) +
378 ", threadId:" + std::to_string(gettid());
379 #ifdef ROSEN_OHOS
380 char threadName[16]; // thread name is restricted to 16 bytes
381 auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
382 if (result == 0) {
383 cpuInfo = cpuInfo + ", threadName: " + threadName;
384 }
385 #endif
386 log.AppendFormat("\n----------\n%s\n", cpuInfo.c_str());
387 log.AppendFormat("Font Cache (CPU):\n");
388 log.AppendFormat(" Size: %.2f kB \n", Drawing::SkiaGraphics::GetFontCacheUsed() / MEMUNIT_RATE);
389 log.AppendFormat(" Glyph Count: %d \n", Drawing::SkiaGraphics::GetFontCacheCountUsed());
390
391 std::vector<ResourcePair> cpuResourceMap = {
392 { "skia/sk_resource_cache/bitmap_", "Bitmaps" },
393 { "skia/sk_resource_cache/rrect-blur_", "Masks" },
394 { "skia/sk_resource_cache/rects-blur_", "Masks" },
395 { "skia/sk_resource_cache/tessellated", "Shadows" },
396 { "skia/sk_resource_cache/yuv-planes_", "YUVPlanes" },
397 { "skia/sk_resource_cache/budget_glyph_count", "Bitmaps" },
398 };
399 SkiaMemoryTracer cpuTracer(cpuResourceMap, true);
400 Drawing::SkiaGraphics::DumpMemoryStatistics(&cpuTracer);
401 log.AppendFormat("CPU Cachesxx:\n");
402 cpuTracer.LogOutput(log);
403 log.AppendFormat("Total CPU memory usage:\n");
404 cpuTracer.LogTotals(log);
405
406 // cache limit
407 size_t cacheLimit = Drawing::SkiaGraphics::GetResourceCacheTotalByteLimit();
408 size_t fontCacheLimit = Drawing::SkiaGraphics::GetFontCacheLimit();
409 log.AppendFormat("\ncpu cache limit = %zu ( fontcache = %zu ):\n", cacheLimit, fontCacheLimit);
410 }
411
DumpGpuCache(DfxString & log,const Drawing::GPUContext * gpuContext,Drawing::GPUResourceTag * tag,std::string & name)412 void MemoryManager::DumpGpuCache(
413 DfxString& log, const Drawing::GPUContext* gpuContext, Drawing::GPUResourceTag* tag, std::string& name)
414 {
415 if (!gpuContext) {
416 log.AppendFormat("gpuContext is nullptr.\n");
417 return;
418 }
419 /* GPU */
420 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
421 log.AppendFormat("\n---------------\nSkia GPU Caches:%s\n", name.c_str());
422 Drawing::TraceMemoryDump gpuTracer("category", true);
423 if (tag) {
424 gpuContext->DumpMemoryStatisticsByTag(&gpuTracer, *tag);
425 } else {
426 gpuContext->DumpMemoryStatistics(&gpuTracer);
427 }
428 gpuTracer.LogOutput(log);
429 log.AppendFormat("Total GPU memory usage:\n");
430 gpuTracer.LogTotals(log);
431 #endif
432 }
433
DumpAllGpuInfo(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)434 void MemoryManager::DumpAllGpuInfo(DfxString& log, const Drawing::GPUContext* gpuContext,
435 std::vector<std::pair<NodeId, std::string>>& nodeTags)
436 {
437 if (!gpuContext) {
438 log.AppendFormat("No valid gpu cache instance.\n");
439 return;
440 }
441 #if defined (RS_ENABLE_GL) || defined(RS_ENABLE_VK)
442 for (auto& nodeTag : nodeTags) {
443 Drawing::GPUResourceTag tag(ExtractPid(nodeTag.first), 0, nodeTag.first, 0, nodeTag.second);
444 DumpGpuCache(log, gpuContext, &tag, nodeTag.second);
445 }
446 #endif
447 }
448
DumpDrawingGpuMemory(DfxString & log,const Drawing::GPUContext * gpuContext,std::vector<std::pair<NodeId,std::string>> & nodeTags)449 void MemoryManager::DumpDrawingGpuMemory(DfxString& log, const Drawing::GPUContext* gpuContext,
450 std::vector<std::pair<NodeId, std::string>>& nodeTags)
451 {
452 if (!gpuContext) {
453 log.AppendFormat("No valid gpu cache instance.\n");
454 return;
455 }
456 /* GPU */
457 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
458 std::string gpuInfo = "pid:" + std::to_string(getpid()) + ", threadId:" + std::to_string(gettid());
459 #ifdef ROSEN_OHOS
460 char threadName[16]; // thread name is restricted to 16 bytes
461 auto result = pthread_getname_np(pthread_self(), threadName, sizeof(threadName));
462 if (result == 0) {
463 gpuInfo = gpuInfo + ", threadName: " + threadName;
464 }
465 #endif
466 // total
467 DumpGpuCache(log, gpuContext, nullptr, gpuInfo);
468 // Get memory of window by tag
469 DumpAllGpuInfo(log, gpuContext, nodeTags);
470 for (uint32_t tagtype = RSTagTracker::TAG_SAVELAYER_DRAW_NODE; tagtype <= RSTagTracker::TAG_CAPTURE; tagtype++) {
471 std::string tagTypeName = RSTagTracker::TagType2String(static_cast<RSTagTracker::TAGTYPE>(tagtype));
472 Drawing::GPUResourceTag tag(0, 0, 0, tagtype, tagTypeName);
473 DumpGpuCache(log, gpuContext, &tag, tagTypeName);
474 }
475 // cache limit
476 size_t cacheLimit = 0;
477 size_t cacheUsed = 0;
478 gpuContext->GetResourceCacheLimits(nullptr, &cacheLimit);
479 gpuContext->GetResourceCacheUsage(nullptr, &cacheUsed);
480 log.AppendFormat("\ngpu limit = %zu ( used = %zu ):\n", cacheLimit, cacheUsed);
481
482 /* ShaderCache */
483 log.AppendFormat("\n---------------\nShader Caches:\n");
484 std::shared_ptr<RenderContext> rendercontext = std::make_shared<RenderContext>();
485 log.AppendFormat(rendercontext->GetShaderCacheSize().c_str());
486 // gpu stat
487 DumpGpuStats(log, gpuContext);
488 #endif
489 }
490
DumpGpuStats(DfxString & log,const Drawing::GPUContext * gpuContext)491 void MemoryManager::DumpGpuStats(DfxString& log, const Drawing::GPUContext* gpuContext)
492 {
493 log.AppendFormat("\n---------------\ndumpGpuStats:\n");
494 std::string stat;
495 gpuContext->DumpGpuStats(stat);
496
497 size_t statIndex = 0;
498 size_t statLength = stat.length();
499 while (statIndex < statLength) {
500 std::string statSubStr;
501 if (statLength - statIndex > DUPM_STRING_BUF_SIZE) {
502 statSubStr = stat.substr(statIndex, DUPM_STRING_BUF_SIZE);
503 statIndex += DUPM_STRING_BUF_SIZE;
504 } else {
505 statSubStr = stat.substr(statIndex, statLength - statIndex);
506 statIndex = statLength;
507 }
508 log.AppendFormat("%s", statSubStr.c_str());
509 }
510 log.AppendFormat("\ndumpGpuStats end\n---------------\n");
511 {
512 static thread_local int tid = gettid();
513 log.AppendFormat("\n------------------\n[%s:%d] dumpAllResource:\n", GetThreadName(), tid);
514 std::stringstream allResources;
515 gpuContext->DumpAllResource(allResources);
516 std::string s;
517 while (std::getline(allResources, s, '\n')) {
518 log.AppendFormat("%s\n", s.c_str());
519 }
520 }
521 }
522
ProcessJemallocString(std::string * sp,const char * str)523 void ProcessJemallocString(std::string* sp, const char* str)
524 {
525 sp->append("strbuf size = " + std::to_string(strlen(str)) + "\n");
526 // split ///////////////////////////////
527 std::vector<std::string> lines;
528 std::string currentLine;
529
530 for (int i = 0; str[i] != '\0' && i < INT_MAX; ++i) {
531 if (str[i] == '\n') {
532 lines.push_back(currentLine);
533 currentLine.clear();
534 } else {
535 currentLine += str[i];
536 }
537 }
538 // last line
539 if (!currentLine.empty()) {
540 lines.push_back(currentLine);
541 }
542
543 // compute tcache and decay free ///////////////////////
544 // tcache_bytes: 784
545 // decaying: time npages sweeps madvises purged
546 // dirty: N/A 94 5084 55957 295998
547 // muzzy: N/A 0 3812 39219 178519
548 const char* strArray[] = {"tcache_bytes:", "decaying:", " dirty:", " muzzy:"};
549 size_t size = sizeof(strArray) / sizeof(strArray[0]);
550 size_t total = 0;
551 for (const auto& line : lines) {
552 for (size_t i = 0; i < size; ++i) {
553 if (strncmp(line.c_str(), strArray[i], strlen(strArray[i])) == 0) {
554 sp->append(line + "\n");
555 total ++;
556 }
557 }
558
559 // get first one: (the total one, others are separated by threads)
560 if (total >= size) {
561 break;
562 }
563 }
564 }
565
DumpMemorySnapshot(DfxString & log)566 void MemoryManager::DumpMemorySnapshot(DfxString& log)
567 {
568 size_t totalMemory = MemorySnapshot::Instance().GetTotalMemory();
569 log.AppendFormat("\n---------------\nmemorySnapshots, totalMemory %zuKB\n", totalMemory / MEMUNIT_RATE);
570 std::unordered_map<pid_t, MemorySnapshotInfo> memorySnapshotInfo;
571 MemorySnapshot::Instance().GetMemorySnapshot(memorySnapshotInfo);
572 for (auto& [pid, snapshotInfo] : memorySnapshotInfo) {
573 std::string infoStr = "pid: " + std::to_string(pid) +
574 ", uid: " + std::to_string(snapshotInfo.uid) + ", bundleName: " + snapshotInfo.bundleName +
575 ", cpu: " + std::to_string(snapshotInfo.cpuMemory / MEMUNIT_RATE) +
576 "KB, gpu: " + std::to_string(snapshotInfo.gpuMemory / MEMUNIT_RATE) + "KB";
577 log.AppendFormat("%s\n", infoStr.c_str());
578 }
579 }
580
ParseMemoryLimit(const cJSON * json,const char * name)581 uint64_t ParseMemoryLimit(const cJSON* json, const char* name)
582 {
583 cJSON* jsonItem = cJSON_GetObjectItem(json, name);
584 if (jsonItem != nullptr && cJSON_IsNumber(jsonItem)) {
585 return static_cast<uint64_t>(jsonItem->valueint) * MEMUNIT_RATE * MEMUNIT_RATE;
586 }
587 return UINT64_MAX;
588 }
589
InitMemoryLimit()590 void MemoryManager::InitMemoryLimit()
591 {
592 auto featureParam = GraphicFeatureParamManager::GetInstance().GetFeatureParam(FEATURE_CONFIGS[MEM]);
593 if (!featureParam) {
594 RS_LOGE("MemoryManager::InitMemoryLimit can not get mem featureParam");
595 return;
596 }
597 std::string rsWatchPointParamName = std::static_pointer_cast<MEMParam>(featureParam)->GetRSWatchPoint();
598 if (rsWatchPointParamName.empty()) {
599 RS_LOGI("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
600 return;
601 }
602
603 std::ifstream configFile;
604 configFile.open(KERNEL_CONFIG_PATH);
605 if (!configFile.is_open()) {
606 RS_LOGE("MemoryManager::InitMemoryLimit can not open config file");
607 return;
608 }
609 std::stringstream filterParamsStream;
610 filterParamsStream << configFile.rdbuf();
611 configFile.close();
612 std::string paramsString = filterParamsStream.str();
613
614 cJSON* root = cJSON_Parse(paramsString.c_str());
615 if (root == nullptr) {
616 RS_LOGE("MemoryManager::InitMemoryLimit can not parse config to json");
617 return;
618 }
619 cJSON* kernelLeak = cJSON_GetObjectItem(root, "KernelLeak");
620 if (kernelLeak == nullptr) {
621 RS_LOGE("MemoryManager::InitMemoryLimit can not find kernelLeak");
622 cJSON_Delete(root);
623 return;
624 }
625 cJSON* version = cJSON_GetObjectItem(kernelLeak, RSSystemProperties::GetVersionType().c_str());
626 if (version == nullptr) {
627 RS_LOGE("MemoryManager::InitMemoryLimit can not find version");
628 cJSON_Delete(root);
629 return;
630 }
631 cJSON* rsWatchPoint = cJSON_GetObjectItem(version, rsWatchPointParamName.c_str());
632 if (rsWatchPoint == nullptr) {
633 RS_LOGE("MemoryManager::InitMemoryLimit can not find rsWatchPoint");
634 cJSON_Delete(root);
635 return;
636 }
637 // warning threshold for total memory of a single process
638 memoryWarning_ = ParseMemoryLimit(rsWatchPoint, "process_warning_threshold");
639 // error threshold for cpu memory of a single process
640 uint64_t cpuMemoryControl = ParseMemoryLimit(rsWatchPoint, "process_cpu_control_threshold");
641 // error threshold for gpu memory of a single process
642 gpuMemoryControl_ = ParseMemoryLimit(rsWatchPoint, "process_gpu_control_threshold");
643 // threshold for the total memory of all processes in renderservice
644 uint64_t totalMemoryWarning = ParseMemoryLimit(rsWatchPoint, "total_threshold");
645 cJSON_Delete(root);
646
647 MemorySnapshot::Instance().InitMemoryLimit(MemoryOverflow, memoryWarning_, cpuMemoryControl, totalMemoryWarning);
648 }
649
SetGpuMemoryLimit(Drawing::GPUContext * gpuContext)650 void MemoryManager::SetGpuMemoryLimit(Drawing::GPUContext* gpuContext)
651 {
652 if (gpuContext == nullptr || gpuMemoryControl_ == UINT64_MAX) {
653 RS_LOGW("MemoryManager::SetGpuMemoryLimit gpuContext is nullptr or gpuMemoryControl_ is uninitialized");
654 return;
655 }
656 gpuContext->InitGpuMemoryLimit(MemoryOverflow, gpuMemoryControl_);
657 }
658
MemoryOverCheck(Drawing::GPUContext * gpuContext)659 void MemoryManager::MemoryOverCheck(Drawing::GPUContext* gpuContext)
660 {
661 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
662 frameCount_++;
663 if (!gpuContext || frameCount_ < FRAME_NUMBER) {
664 return;
665 }
666 frameCount_ = 0;
667
668 FillMemorySnapshot();
669 std::unordered_map<pid_t, size_t> gpuMemory;
670 gpuContext->GetUpdatedMemoryMap(gpuMemory);
671
672 auto task = [gpuMemory = std::move(gpuMemory)]() {
673 std::unordered_map<pid_t, MemorySnapshotInfo> infoMap;
674 bool isTotalOver = false;
675 std::unordered_map<pid_t, size_t> subThreadGpuMemoryOfPid;
676 RSSubThreadManager::Instance()->GetGpuMemoryForReport(subThreadGpuMemoryOfPid);
677 MemorySnapshot::Instance().UpdateGpuMemoryInfo(gpuMemory, subThreadGpuMemoryOfPid, infoMap, isTotalOver);
678 MemoryOverForReport(infoMap, isTotalOver);
679 };
680 RSBackgroundThread::Instance().PostTask(task);
681 #endif
682 }
683
MemoryOverForReport(std::unordered_map<pid_t,MemorySnapshotInfo> & infoMap,bool isTotalOver)684 void MemoryManager::MemoryOverForReport(std::unordered_map<pid_t, MemorySnapshotInfo>& infoMap, bool isTotalOver)
685 {
686 #if defined(RS_ENABLE_GL) || defined(RS_ENABLE_VK)
687 auto now = std::chrono::steady_clock::now().time_since_epoch();
688 uint64_t currentTime =
689 static_cast<uint64_t>(std::chrono::duration_cast<std::chrono::milliseconds>(now).count());
690 // total memory overflow of all processes in renderservice
691 if (isTotalOver && currentTime > totalMemoryReportTime_) {
692 TotalMemoryOverReport(infoMap);
693 totalMemoryReportTime_ = currentTime + MEMORY_REPORT_INTERVAL;
694 }
695 bool needReport = false;
696 bool needReportKill = false;
697 for (const auto& [pid, memoryInfo] : infoMap) {
698 if (memoryInfo.TotalMemory() <= memoryWarning_) {
699 continue;
700 }
701 needReport = false;
702 needReportKill = false;
703 {
704 std::lock_guard<std::mutex> lock(mutex_);
705 auto it = pidInfo_.find(pid);
706 if (it == pidInfo_.end()) {
707 pidInfo_.emplace(pid, currentTime + MEMORY_REPORT_INTERVAL);
708 needReport = true;
709 } else if (currentTime > it->second) {
710 it->second = currentTime + MEMORY_REPORT_INTERVAL;
711 needReport = true;
712 }
713 if (memoryInfo.gpuMemory + memoryInfo.subThreadGpuMemory> gpuMemoryControl_ &&
714 processKillReportPidSet_.find(pid) == processKillReportPidSet_.end()) {
715 needReportKill = true;
716 }
717 }
718 if (needReport) {
719 MemoryOverReport(pid, memoryInfo, RSEventName::RENDER_MEMORY_OVER_WARNING, "");
720 }
721 if (needReportKill) {
722 MemoryOverflow(pid, memoryInfo.gpuMemory + memoryInfo.subThreadGpuMemory, true);
723 }
724 }
725 #endif
726 }
727
FillMemorySnapshot()728 void MemoryManager::FillMemorySnapshot()
729 {
730 std::vector<pid_t> pidList;
731 MemorySnapshot::Instance().GetDirtyMemorySnapshot(pidList);
732 if (pidList.size() == 0) {
733 return;
734 }
735
736 std::unordered_map<pid_t, MemorySnapshotInfo> infoMap;
737 for (auto& pid : pidList) {
738 MemorySnapshotInfo& mInfo = infoMap[pid];
739 auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
740 appMgrClient.GetBundleNameByPid(pid, mInfo.bundleName, mInfo.uid);
741 }
742 MemorySnapshot::Instance().FillMemorySnapshot(infoMap);
743 }
744
KillProcessByPid(const pid_t pid,const MemorySnapshotInfo & info,const std::string & reason)745 static void KillProcessByPid(const pid_t pid, const MemorySnapshotInfo& info, const std::string& reason)
746 {
747 #ifdef RS_ENABLE_UNI_RENDER
748 if (pid > 0) {
749 int32_t eventWriteStatus = -1;
750 AAFwk::ExitReason killReason{AAFwk::Reason::REASON_RESOURCE_CONTROL, KILL_PROCESS_TYPE, reason};
751 int32_t ret = (int32_t)AAFwk::AbilityManagerClient::GetInstance()->KillProcessWithReason(pid, killReason);
752 if (ret == ERR_OK) {
753 RS_TRACE_NAME("KillProcessByPid HiSysEventWrite");
754 eventWriteStatus = HiSysEventWrite(HiviewDFX::HiSysEvent::Domain::FRAMEWORK, "PROCESS_KILL",
755 HiviewDFX::HiSysEvent::EventType::FAULT, "PID", pid, "PROCESS_NAME", info.bundleName,
756 "MSG", reason, "FOREGROUND", false, "UID", info.uid, "BUNDLE_NAME", info.bundleName,
757 "REASON", GPU_RS_LEAK);
758 }
759 // To prevent the print from being filtered, use RS_LOGE.
760 RS_LOGE("KillProcessByPid, pid: %{public}d, process name: %{public}s, "
761 "killStatus: %{public}d, eventWriteStatus: %{public}d, reason: %{public}s",
762 static_cast<int32_t>(pid), info.bundleName.c_str(), ret, eventWriteStatus, reason.c_str());
763 }
764 #endif
765 }
766
MemoryOverflow(pid_t pid,size_t overflowMemory,bool isGpu)767 void MemoryManager::MemoryOverflow(pid_t pid, size_t overflowMemory, bool isGpu)
768 {
769 if (pid == 0) {
770 RS_LOGD("MemoryManager::MemoryOverflow pid = 0");
771 return;
772 }
773 MemorySnapshotInfo info;
774 MemorySnapshot::Instance().GetMemorySnapshotInfoByPid(pid, info);
775 if (isGpu) {
776 info.gpuMemory = overflowMemory;
777 }
778 RSMainThread::Instance()->PostTask([pid, info]() mutable {
779 RS_TRACE_NAME_FMT("RSMem Dump Task");
780 std::unordered_set<std::u16string> argSets;
781 std::string dumpString = "";
782 std::string type = MEM_GPU_TYPE;
783 RSMainThread::Instance()->DumpMem(argSets, dumpString, type, 0);
784 MemoryOverReport(pid, info, RSEventName::RENDER_MEMORY_OVER_ERROR, dumpString);
785 });
786 std::string reason = "RENDER_MEMORY_OVER_ERROR: cpu[" + std::to_string(info.cpuMemory)
787 + "], gpu[" + std::to_string(info.gpuMemory) + "], total["
788 + std::to_string(info.TotalMemory()) + "]";
789
790 if (info.bundleName.empty()) {
791 auto& appMgrClient = RSSingleton<AppExecFwk::AppMgrClient>::GetInstance();
792 appMgrClient.GetBundleNameByPid(pid, info.bundleName, info.uid);
793 }
794 KillProcessByPid(pid, info, reason);
795 {
796 std::lock_guard<std::mutex> lock(mutex_);
797 processKillReportPidSet_.emplace(pid);
798 }
799
800 RS_LOGE("RSMemoryOverflow pid[%{public}d] cpu[%{public}zu] gpu[%{public}zu]", pid, info.cpuMemory, info.gpuMemory);
801 }
802
MemoryOverReport(const pid_t pid,const MemorySnapshotInfo & info,const std::string & reportName,const std::string & hidumperReport)803 void MemoryManager::MemoryOverReport(const pid_t pid, const MemorySnapshotInfo& info, const std::string& reportName,
804 const std::string& hidumperReport)
805 {
806 std::string gpuMemInfo;
807 std::ifstream gpuMemInfoFile;
808 gpuMemInfoFile.open(GPUMEM_INFO_PATH);
809 if (gpuMemInfoFile.is_open()) {
810 std::stringstream gpuMemInfoStream;
811 gpuMemInfoStream << gpuMemInfoFile.rdbuf();
812 gpuMemInfo = gpuMemInfoStream.str();
813 gpuMemInfoFile.close();
814 } else {
815 gpuMemInfo = reportName;
816 RS_LOGE("MemoryManager::MemoryOverReport can not open gpumem info");
817 }
818
819 RS_TRACE_NAME("MemoryManager::MemoryOverReport HiSysEventWrite");
820
821 auto now = std::chrono::system_clock::now();
822 uint64_t currentTime = static_cast<uint64_t>(std::chrono::system_clock::to_time_t(now));
823 std::string filePath = "/data/service/el0/render_service/renderservice_mem_" +
824 std::to_string(pid) + "_" + std::to_string(currentTime) + ".txt";
825 WriteInfoToFile(filePath, gpuMemInfo, hidumperReport);
826
827 int ret = RSHiSysEvent::EventWrite(reportName, RSEventType::RS_STATISTIC,
828 "PID", pid,
829 "BUNDLE_NAME", info.bundleName,
830 "CPU_MEMORY", info.cpuMemory,
831 "GPU_MEMORY", info.gpuMemory,
832 "TOTAL_MEMORY", info.TotalMemory(),
833 "GPU_PROCESS_INFO", filePath);
834 RS_LOGW("hisysevent writ result=%{public}d, send event [FRAMEWORK,PROCESS_KILL], "
835 "pid[%{public}d] bundleName[%{public}s] cpu[%{public}zu] gpu[%{public}zu] total[%{public}zu]",
836 ret, pid, info.bundleName.c_str(), info.cpuMemory, info.gpuMemory, info.TotalMemory());
837
838 std::vector<std::string> needCleanFileName;
839 if (NeedCleanNow(needCleanFileName)) {
840 CleanFiles(needCleanFileName);
841 }
842 }
843
WriteInfoToFile(std::string & filePath,std::string & gpuMemInfo,const std::string & hidumperReport)844 void MemoryManager::WriteInfoToFile(std::string& filePath, std::string& gpuMemInfo, const std::string& hidumperReport)
845 {
846 std::ofstream tempFile(filePath);
847 if (tempFile.is_open()) {
848 tempFile << "\n******************************\n";
849 tempFile << gpuMemInfo;
850 tempFile << "\n************ endl ************\n";
851 tempFile.close();
852 } else {
853 RS_LOGE("MemoryOverReport::file open fail!");
854 }
855 if (!hidumperReport.empty()) {
856 std::ofstream tempFile(filePath, std::ios::app);
857 if (tempFile.is_open()) {
858 tempFile << "\n******************************\n";
859 tempFile << "LOGGER_RENDER_SERVICE_MEM\n";
860 tempFile << hidumperReport;
861 tempFile.close();
862 } else {
863 RS_LOGE("MemoryOverReport::file open fail!");
864 }
865 }
866 }
867
NeedCleanNow(std::vector<std::string> & needCleanFileName)868 bool MemoryManager::NeedCleanNow(std::vector<std::string>& needCleanFileName)
869 {
870 const std::string filePath = "/data/service/el0/render_service";
871 DIR* dir = opendir(filePath.c_str());
872 if (dir == nullptr) {
873 RS_LOGE("filePath open fail!");
874 return false;
875 }
876
877 std::string prefix = "renderservice_mem_";
878 size_t prefixLen = prefix.length();
879 struct dirent* entry;
880 int fileNum = 0;
881 while ((entry = readdir(dir)) != nullptr) {
882 if (entry->d_type == DT_REG) {
883 std::string fileName = entry->d_name;
884 if (fileName.substr(0, prefixLen) == prefix) {
885 fileNum++;
886 needCleanFileName.push_back(fileName);
887 }
888 }
889 }
890 if (fileNum < RETAIN_FILE_NUM) {
891 return false;
892 }
893 std::sort(needCleanFileName.begin(), needCleanFileName.end(), []
894 (const std::string& firstFile, const std::string& secondFile) {
895 int offset = 1;
896 std::string firstFileTime = firstFile.substr(firstFile.rfind('_') + offset,
897 firstFile.rfind('.') - firstFile.rfind('_') - offset);
898 std::string secondFileTime = secondFile.substr(secondFile.rfind('_') + offset,
899 secondFile.rfind('.') - secondFile.rfind('_') - offset);
900 return std::stoi(firstFileTime) < std::stoi(secondFileTime);
901 });
902 needCleanFileName.erase(needCleanFileName.begin() + (needCleanFileName.size() - RETAIN_FILE_NUM),
903 needCleanFileName.end());
904 return true;
905 }
906
CleanFiles(std::vector<std::string> & needCleanFileName)907 void MemoryManager::CleanFiles(std::vector<std::string>& needCleanFileName)
908 {
909 for (const auto& entry : std::filesystem::directory_iterator("/data/service/el0/render_service")) {
910 if (entry.is_regular_file()) {
911 std::string fileName = entry.path().filename().string();
912 if (std::find(needCleanFileName.begin(), needCleanFileName.end(), fileName) !=
913 needCleanFileName.end()) {
914 std::filesystem::remove(entry.path());
915 }
916 }
917 }
918 }
919
TotalMemoryOverReport(const std::unordered_map<pid_t,MemorySnapshotInfo> & infoMap)920 void MemoryManager::TotalMemoryOverReport(const std::unordered_map<pid_t, MemorySnapshotInfo>& infoMap)
921 {
922 std::ostringstream oss;
923 for (const auto& info : infoMap) {
924 oss << info.first << '_' << info.second.TotalMemory() << ' ';
925 }
926 HiSysEventWrite(OHOS::HiviewDFX::HiSysEvent::Domain::GRAPHIC, "RENDER_MEMORY_OVER_TOTAL_ERROR",
927 OHOS::HiviewDFX::HiSysEvent::EventType::STATISTIC, "MEMORY_MSG", oss.str());
928 }
929
ErasePidInfo(const std::set<pid_t> & exitedPidSet)930 void MemoryManager::ErasePidInfo(const std::set<pid_t>& exitedPidSet)
931 {
932 std::lock_guard<std::mutex> lock(mutex_);
933 for (auto pid : exitedPidSet) {
934 pidInfo_.erase(pid);
935 processKillReportPidSet_.erase(pid);
936 }
937 }
938
VmaDefragment(Drawing::GPUContext * gpuContext)939 void MemoryManager::VmaDefragment(Drawing::GPUContext* gpuContext)
940 {
941 #if defined(RS_ENABLE_VK)
942 if (!gpuContext) {
943 RS_LOGE("VmaDefragment fail, gpuContext is nullptr");
944 return;
945 }
946 RS_TRACE_NAME_FMT("VmaDefragment");
947 gpuContext->VmaDefragment();
948 #endif
949 }
950
DumpExitPidMem(std::string & log,int pid)951 void MemoryManager::DumpExitPidMem(std::string& log, int pid)
952 {
953 RS_TRACE_NAME_FMT("DumpExitPidMem");
954 DfxString dfxlog;
955 auto mem = MemoryTrack::Instance().CountRSMemory(pid);
956 size_t allNodeAndPixelmapSize = mem.GetTotalMemorySize();
957 dfxlog.AppendFormat("allNodeAndPixelmapSize: %zu \n", allNodeAndPixelmapSize);
958
959 size_t allModifySize = 0;
960 RSMainThread::Instance()->ScheduleTask([pid, &allModifySize] () {
961 const auto& nodeMap = RSMainThread::Instance()->GetContext().GetNodeMap();
962 nodeMap.TraversalNodesByPid(pid, [&allModifySize] (const std::shared_ptr<RSBaseRenderNode>& node) {
963 allModifySize += node->GetAllModifierSize();
964 });
965 }).wait();
966 dfxlog.AppendFormat("allModifySize: %zu \n", allModifySize);
967
968 size_t allGpuSize = 0;
969 RSUniRenderThread::Instance().PostSyncTask([&allGpuSize, pid] {
970 MemoryGraphic mem = CountPidMemory(pid,
971 RSUniRenderThread::Instance().GetRenderEngine()->GetRenderContext()->GetDrGPUContext());
972 allGpuSize += static_cast<size_t>(mem.GetGpuMemorySize());
973 });
974 dfxlog.AppendFormat("allGpuSize: %zu \n", allGpuSize);
975 dfxlog.AppendFormat("pid: %d totalSize: %zu \n", pid, (allNodeAndPixelmapSize + allModifySize + allGpuSize));
976 log.append(dfxlog.GetString());
977 }
978
Instance()979 RSReclaimMemoryManager& RSReclaimMemoryManager::Instance()
980 {
981 static RSReclaimMemoryManager instance;
982 return instance;
983 }
984
TriggerReclaimTask()985 void RSReclaimMemoryManager::TriggerReclaimTask()
986 {
987 // Clear two Applications in one second, post task to reclaim.
988 auto& unirenderThread = RSUniRenderThread::Instance();
989 if (!unirenderThread.IsTimeToReclaim()) {
990 static std::chrono::steady_clock::time_point lastClearAppTime = std::chrono::steady_clock::now();
991 auto currentTime = std::chrono::steady_clock::now();
992 bool isTimeToReclaim = std::chrono::duration_cast<std::chrono::milliseconds>(
993 currentTime - lastClearAppTime).count() < CLEAR_TWO_APPS_TIME;
994 if (isTimeToReclaim) {
995 unirenderThread.ReclaimMemory();
996 unirenderThread.SetTimeToReclaim(true);
997 isReclaimInterrupt_.store(false);
998 }
999 lastClearAppTime = currentTime;
1000 }
1001 }
1002
InterruptReclaimTask(const std::string & sceneId)1003 void RSReclaimMemoryManager::InterruptReclaimTask(const std::string& sceneId)
1004 {
1005 // When operate in launcher, interrupt reclaim task.
1006 if (!isReclaimInterrupt_.load() && sceneId != EVENT_ENTER_RECENTS) {
1007 isReclaimInterrupt_.store(true);
1008 }
1009 }
1010
1011 } // namespace OHOS::Rosen