• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <sys/wait.h>
16 #include <sys/prctl.h>
17 #include "common_interfaces/profiler/heap_profiler_listener.h"
18 #include "ecmascript/dfx/hprof/heap_profiler.h"
19 
20 #include "ecmascript/checkpoint/thread_state_transition.h"
21 #include "ecmascript/dfx/hprof/heap_snapshot.h"
22 #include "ecmascript/dfx/hprof/rawheap_dump.h"
23 #include "ecmascript/mem/heap-inl.h"
24 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
25 #include "ecmascript/base/block_hook_scope.h"
26 #include "ecmascript/dfx/hprof/heap_root_visitor.h"
27 #include "ecmascript/mem/object_xray.h"
28 #include "ecmascript/platform/backtrace.h"
29 #include "ecmascript/platform/file.h"
30 
31 #if defined(ENABLE_DUMP_IN_FAULTLOG)
32 #include "faultloggerd_client.h"
33 #endif
34 
35 namespace panda::ecmascript {
36 
FindId(JSTaggedType addr)37 std::pair<bool, NodeId> EntryIdMap::FindId(JSTaggedType addr)
38 {
39     auto it = idMap_.find(addr);
40     if (it == idMap_.end()) {
41         return std::make_pair(false, GetNextId()); // return nextId if entry not exits
42     } else {
43         return std::make_pair(true, it->second);
44     }
45 }
46 
FindOrInsertNodeId(JSTaggedType addr)47 NodeId EntryIdMap::FindOrInsertNodeId(JSTaggedType addr)
48 {
49     auto it = idMap_.find(addr);
50     if (it != idMap_.end()) {
51         return it->second;
52     }
53     NodeId id = GetNextId();
54     idMap_.emplace(addr, id);
55     return id;
56 }
57 
InsertId(JSTaggedType addr,NodeId id)58 bool EntryIdMap::InsertId(JSTaggedType addr, NodeId id)
59 {
60     auto [iter, inserted] = idMap_.insert_or_assign(addr, id);
61     return inserted;
62 }
63 
EraseId(JSTaggedType addr)64 bool EntryIdMap::EraseId(JSTaggedType addr)
65 {
66     return idMap_.erase(addr);
67 }
68 
Move(JSTaggedType oldAddr,JSTaggedType forwardAddr)69 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
70 {
71     if (oldAddr == forwardAddr) {
72         return true;
73     }
74     auto it = idMap_.find(oldAddr);
75     if (it != idMap_.end()) {
76         NodeId id = it->second;
77         idMap_.erase(it);
78         idMap_.insert_or_assign(forwardAddr, id);
79         return true;
80     }
81     return false;
82 }
83 
UpdateEntryIdMap(HeapSnapshot * snapshot)84 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
85 {
86     LOG_ECMA(INFO) << "EntryIdMap::UpdateEntryIdMap";
87     if (snapshot == nullptr) {
88         LOG_ECMA(FATAL) << "EntryIdMap::UpdateEntryIdMap:snapshot is nullptr";
89         UNREACHABLE();
90     }
91 
92     HeapMarker marker {};
93     auto nodes = snapshot->GetNodes();
94     for (auto node : *nodes) {
95         auto addr = node->GetAddress();
96         if (JSTaggedValue{addr}.IsHeapObject()) {
97             marker.Mark(addr);
98         }
99     }
100     RemoveUnmarkedObjects(marker);
101 }
102 
RemoveUnmarkedObjects(HeapMarker & marker)103 void EntryIdMap::RemoveUnmarkedObjects(HeapMarker &marker)
104 {
105     for (auto it = idMap_.begin(); it != idMap_.end();) {
106         JSTaggedType addr = it->first;
107         if (JSTaggedValue{addr}.IsHeapObject() && !marker.IsMarked(addr)) {
108             it = idMap_.erase(it);
109         } else {
110             ++it;
111         }
112     }
113 }
114 
HeapProfiler(const EcmaVM * vm)115 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
116 {
117     isProfiling_ = false;
118     entryIdMap_ = GetChunk()->New<EntryIdMap>();
119     if (g_isEnableCMCGC) {
120         moveEventCbId_ = common::HeapProfilerListener::GetInstance().RegisterMoveEventCb(
121             [this](uintptr_t fromObj, uintptr_t toObj, size_t size) {
122                 this->MoveEvent(fromObj, reinterpret_cast<TaggedObject *>(toObj), size);
123             });
124     }
125 }
126 
~HeapProfiler()127 HeapProfiler::~HeapProfiler()
128 {
129     JSPandaFileManager::GetInstance()->ClearNameMap();
130     ClearSnapshot();
131     GetChunk()->Delete(entryIdMap_);
132     if (g_isEnableCMCGC) {
133         common::HeapProfilerListener::GetInstance().UnRegisterMoveEventCb(moveEventCbId_);
134     }
135 }
136 
AllocationEvent(TaggedObject * address,size_t size)137 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
138 {
139     LockHolder lock(mutex_);
140     DISALLOW_GARBAGE_COLLECTION;
141     if (isProfiling_) {
142         // Id will be allocated later while add new node
143         if (heapTracker_ != nullptr) {
144             heapTracker_->AllocationEvent(address, size);
145         }
146     }
147 }
148 
MoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)149 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
150 {
151     LockHolder lock(mutex_);
152     if (isProfiling_) {
153         entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
154         if (heapTracker_ != nullptr) {
155             heapTracker_->MoveEvent(address, forwardAddress, size);
156         }
157     }
158 }
159 
UpdateHeapObjects(HeapSnapshot * snapshot)160 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
161 {
162     SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
163     snapshot->UpdateNodes();
164 }
165 
DumpHeapSnapshotForOOM(const DumpSnapShotOption & dumpOption,bool fromSharedGC)166 void HeapProfiler::DumpHeapSnapshotForOOM([[maybe_unused]] const DumpSnapShotOption &dumpOption,
167                                           [[maybe_unused]] bool fromSharedGC)
168 {
169 #if defined(ENABLE_DUMP_IN_FAULTLOG)
170     // Write in faultlog for heap leak.
171     int32_t fd;
172 #if defined(PANDA_TARGET_ARM32)
173     DumpSnapShotOption doDumpOption;
174     doDumpOption.dumpFormat = DumpFormat::JSON;
175     doDumpOption.isFullGC = dumpOption.isFullGC;
176     doDumpOption.isSimplify = true;
177     doDumpOption.isBeforeFill = false;
178     fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
179     if (fd < 0) {
180         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
181         return;
182     }
183     FileDescriptorStream stream(fd);
184     DumpHeapSnapshot(&stream, doDumpOption);
185 #else
186     if (dumpOption.isDumpOOM && dumpOption.dumpFormat == DumpFormat::BINARY) {
187         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_RAW_SNAPSHOT));
188     } else {
189         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
190     }
191     if (fd < 0) {
192         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
193         return;
194     }
195     FileDescriptorStream stream(fd);
196     if (!fromSharedGC) {
197         DumpHeapSnapshot(&stream, dumpOption);
198     } else {
199         DumpHeapSnapshotFromSharedGC(&stream, dumpOption);
200     }
201 #endif
202 #endif
203 }
204 
DumpHeapSnapshotFromSharedGC(Stream * stream,const DumpSnapShotOption & dumpOption)205 void HeapProfiler::DumpHeapSnapshotFromSharedGC(Stream *stream, const DumpSnapShotOption &dumpOption)
206 {
207     base::BlockHookScope blockScope;
208     const_cast<Heap*>(vm_->GetHeap())->Prepare();
209     SharedHeap::GetInstance()->Prepare(true);
210     Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
211         ASSERT(!thread->IsInRunningState());
212         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
213     });
214     BinaryDump(stream, dumpOption);
215     stream->EndOfStream();
216 }
217 
DoDump(Stream * stream,Progress * progress,const DumpSnapShotOption & dumpOption)218 bool HeapProfiler::DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)
219 {
220     DISALLOW_GARBAGE_COLLECTION;
221     int32_t heapCount = 0;
222     size_t heapSize = 0;
223     HeapSnapshot *snapshot = nullptr;
224     {
225         if (dumpOption.isFullGC) {
226             if (g_isEnableCMCGC) {
227                 heapSize = common::Heap::GetHeap().GetSurvivedSize();
228                 heapCount = static_cast<int32_t>(common::Heap::GetHeap().GetAllocatedSize());
229             } else {
230                 heapSize = vm_->GetHeap()->GetLiveObjectSize();
231                 heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
232             }
233             LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
234             LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap count " << heapCount;
235             if (progress != nullptr) {
236                 progress->ReportProgress(0, heapCount);
237             }
238         }
239         snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, dumpOption);
240         ASSERT(snapshot != nullptr);
241     }
242     // In async mode, EntryIdMap is filled and updated in parent-process,
243     // so EntryIdMap needs to be updated only in sync mode.
244     if (dumpOption.isSync) {
245         entryIdMap_->UpdateEntryIdMap(snapshot);
246     }
247     isProfiling_ = true;
248     if (progress != nullptr) {
249         progress->ReportProgress(heapCount, heapCount);
250     }
251     if (!stream->Good()) {
252         FileStream newStream(GenDumpFileName(dumpOption.dumpFormat));
253         auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
254         GetChunk()->Delete(snapshot);
255         return serializerResult;
256     }
257     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
258     GetChunk()->Delete(snapshot);
259     return serializerResult;
260 }
261 
WaitProcess(pid_t pid,const std::function<void (uint8_t)> & callback)262 [[maybe_unused]]static void WaitProcess(pid_t pid, const std::function<void(uint8_t)> &callback)
263 {
264     time_t startTime = time(nullptr);
265     constexpr int DUMP_TIME_OUT = 300;
266     constexpr int DEFAULT_SLEEP_TIME = 100000;
267     while (true) {
268         int status = 0;
269         pid_t p = waitpid(pid, &status, WNOHANG);
270         if (p < 0) {
271             LOG_GC(ERROR) << "DumpHeapSnapshot wait failed ";
272             if (callback) {
273                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::FAILED_TO_WAIT));
274             }
275             return;
276         }
277         if (p == pid) {
278             if (callback) {
279                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::SUCCESS));
280             }
281             return;
282         }
283         if (time(nullptr) > startTime + DUMP_TIME_OUT) {
284             LOG_GC(ERROR) << "DumpHeapSnapshot kill thread, wait " << DUMP_TIME_OUT << " s";
285             kill(pid, SIGTERM);
286             if (callback) {
287                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::WAIT_FORK_PROCESS_TIMEOUT));
288             }
289             return;
290         }
291         usleep(DEFAULT_SLEEP_TIME);
292     }
293 }
294 
BinaryDump(Stream * stream,const DumpSnapShotOption & dumpOption)295 bool HeapProfiler::BinaryDump(Stream *stream, const DumpSnapShotOption &dumpOption)
296 {
297     [[maybe_unused]] EcmaHandleScope ecmaHandleScope(vm_->GetAssociatedJSThread());
298     DumpSnapShotOption option;
299     std::vector<std::string> filePaths;
300     std::vector<uint64_t> fileSizes;
301     auto stringTable = chunk_.New<StringHashMap>(vm_);
302     auto snapshot = chunk_.New<HeapSnapshot>(vm_, stringTable, option, false, entryIdMap_);
303 
304     if (const_cast<EcmaVM *>(vm_)->GetJSOptions().EnableRawHeapCrop()) {
305         Runtime::GetInstance()->SetRawHeapDumpCropLevel(RawHeapDumpCropLevel::LEVEL_V2);
306     }
307 
308     RawHeapDump *rawHeapDump = nullptr;
309     RawHeapDumpCropLevel cropLevel = Runtime::GetInstance()->GetRawHeapDumpCropLevel();
310     switch (cropLevel) {
311         case RawHeapDumpCropLevel::LEVEL_V1:
312             rawHeapDump = new RawHeapDumpV1(vm_, stream, snapshot, entryIdMap_, dumpOption);
313             break;
314         case RawHeapDumpCropLevel::LEVEL_V2:
315             rawHeapDump = new RawHeapDumpV2(vm_, stream, snapshot, entryIdMap_, dumpOption);
316             break;
317         default:
318             LOG_ECMA(FATAL) << "rawheap dump, do not supported crop level " << static_cast<int>(cropLevel);
319             UNREACHABLE();
320             break;
321     }
322 
323     rawHeapDump->BinaryDump();
324     filePaths.emplace_back(RAWHEAP_FILE_NAME);
325     fileSizes.emplace_back(rawHeapDump->GetRawHeapFileOffset());
326     vm_->GetEcmaGCKeyStats()->SendSysEventDataSize(filePaths, fileSizes);
327     chunk_.Delete<StringHashMap>(stringTable);
328     chunk_.Delete<HeapSnapshot>(snapshot);
329     delete rawHeapDump;
330     return true;
331 }
332 
FillIdMap()333 void HeapProfiler::FillIdMap()
334 {
335     HeapMarker marker {};
336 
337     // Iterate SharedHeap Object
338     SharedHeap* sHeap = SharedHeap::GetInstance();
339     if (sHeap != nullptr) {
340         sHeap->IterateOverObjects([this, &marker](TaggedObject *obj) {
341             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
342             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
343             entryIdMap_->InsertId(addr, sequenceId);
344             marker.Mark(addr);
345         });
346         sHeap->GetReadOnlySpace()->IterateOverObjects([this, &marker](TaggedObject *obj) {
347             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
348             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
349             entryIdMap_->InsertId(addr, sequenceId);
350             marker.Mark(addr);
351         });
352     }
353 
354     // Iterate LocalHeap Object
355     auto heap = vm_->GetHeap();
356     if (heap != nullptr) {
357         heap->IterateOverObjects([this, &marker](TaggedObject *obj) {
358             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
359             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
360             entryIdMap_->InsertId(addr, sequenceId);
361             marker.Mark(addr);
362         });
363     }
364 
365     entryIdMap_->RemoveUnmarkedObjects(marker);
366 }
367 
DumpHeapSnapshot(Stream * stream,const DumpSnapShotOption & dumpOption,Progress * progress,std::function<void (uint8_t)> callback)368 bool HeapProfiler::DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress,
369                                     std::function<void(uint8_t)> callback)
370 {
371     bool res = false;
372     base::BlockHookScope blockScope;
373     ThreadManagedScope managedScope(vm_->GetAssociatedJSThread());
374     pid_t pid = -1;
375     {
376         if (dumpOption.isFullGC) {
377             if (g_isEnableCMCGC) {
378                 common::BaseRuntime::RequestGC(common::GC_REASON_BACKUP, false, common::GC_TYPE_FULL);
379             } else {
380                 [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
381                 ForceSharedGC();
382                 ASSERT(heapClean);
383             }
384         }
385         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread()); // suspend All.
386         if (g_isEnableCMCGC) {
387             common::Heap::GetHeap().WaitForGCFinish();
388         } else {
389             const_cast<Heap*>(vm_->GetHeap())->Prepare();
390             SharedHeap::GetInstance()->Prepare(true);
391             Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
392                 ASSERT(!thread->IsInRunningState());
393                 const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
394             });
395         }
396         // OOM and ThresholdReachedDump.
397         if (dumpOption.isDumpOOM) {
398             res = BinaryDump(stream, dumpOption);
399             stream->EndOfStream();
400             return res;
401         }
402         // ide.
403         if (dumpOption.isSync) {
404             if (dumpOption.dumpFormat == DumpFormat::BINARY) {
405                 return BinaryDump(stream, dumpOption);
406             } else {
407                 return DoDump(stream, progress, dumpOption);
408             }
409         }
410         AppFreezeFilterCallback appfreezeCallback = Runtime::GetInstance()->GetAppFreezeFilterCallback();
411         std::string unused;
412         if (appfreezeCallback != nullptr && !appfreezeCallback(getpid(), false, unused)) {
413             LOG_ECMA(ERROR) << "failed to set appfreeze filter";
414             return false;
415         }
416         // hidumper do fork and fillmap.
417         if (dumpOption.isBeforeFill) {
418             FillIdMap();
419         }
420         // fork
421         if ((pid = fork()) < 0) {
422             LOG_ECMA(ERROR) << "DumpHeapSnapshot fork failed: " << strerror(errno);
423             if (callback) {
424                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::FORK_FAILED));
425             }
426             return false;
427         }
428         if (pid == 0) {
429             vm_->GetAssociatedJSThread()->EnableCrossThreadExecution();
430             prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("dump_process"), 0, 0, 0);
431             if (dumpOption.dumpFormat == DumpFormat::BINARY) {
432                 res = BinaryDump(stream, dumpOption);
433                 stream->EndOfStream();
434             } else {
435                 res = DoDump(stream, progress, dumpOption);
436             }
437             _exit(0);
438         }
439     }
440     if (pid != 0) {
441         std::thread thread(&WaitProcess, pid, callback);
442         thread.detach();
443     }
444     isProfiling_ = true;
445     return res;
446 }
447 
StartHeapTracking(double timeInterval,bool isVmMode,Stream * stream,bool traceAllocation,bool newThread)448 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
449                                      bool traceAllocation, bool newThread)
450 {
451     if (g_isEnableCMCGC) {
452         common::BaseRuntime::RequestGC(common::GC_REASON_BACKUP, false, common::GC_TYPE_FULL);
453     } else {
454         vm_->CollectGarbage(TriggerGCType::OLD_GC);
455         ForceSharedGC();
456     }
457     SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
458     DumpSnapShotOption dumpOption;
459     dumpOption.isVmMode = isVmMode;
460     dumpOption.isPrivate = false;
461     dumpOption.captureNumericValue = false;
462     HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, dumpOption, traceAllocation);
463     if (snapshot == nullptr) {
464         return false;
465     }
466     isProfiling_ = true;
467     UpdateHeapObjects(snapshot);
468     heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
469     const_cast<EcmaVM *>(vm_)->StartHeapTracking();
470     if (newThread) {
471         heapTracker_->StartTracing();
472     }
473 
474     return true;
475 }
476 
UpdateHeapTracking(Stream * stream)477 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
478 {
479     if (heapTracker_ == nullptr) {
480         return false;
481     }
482     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
483     if (snapshot == nullptr) {
484         return false;
485     }
486 
487     {
488         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
489         UpdateHeapObjects(snapshot);
490         snapshot->RecordSampleTime();
491     }
492 
493     if (stream != nullptr) {
494         snapshot->PushHeapStat(stream);
495     }
496 
497     return true;
498 }
499 
StopHeapTracking(Stream * stream,Progress * progress,bool newThread)500 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
501 {
502     if (heapTracker_ == nullptr) {
503         return false;
504     }
505     int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
506 
507     const_cast<EcmaVM *>(vm_)->StopHeapTracking();
508     if (newThread) {
509         heapTracker_->StopTracing();
510     }
511 
512     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
513     if (snapshot == nullptr) {
514         return false;
515     }
516 
517     if (progress != nullptr) {
518         progress->ReportProgress(0, heapCount);
519     }
520     {
521         if (g_isEnableCMCGC) {
522             common::BaseRuntime::RequestGC(common::GC_REASON_BACKUP, false, common::GC_TYPE_FULL);
523             SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
524             snapshot->FinishSnapshot();
525         } else {
526             ForceSharedGC();
527             SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
528             SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
529             snapshot->FinishSnapshot();
530         }
531     }
532 
533     isProfiling_ = false;
534     if (progress != nullptr) {
535         progress->ReportProgress(heapCount, heapCount);
536     }
537     return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
538 }
539 
GenDumpFileName(DumpFormat dumpFormat)540 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
541 {
542     CString filename("hprof_");
543     switch (dumpFormat) {
544         case DumpFormat::JSON:
545             filename.append(GetTimeStamp());
546             break;
547         case DumpFormat::BINARY:
548             filename.append("unimplemented");
549             break;
550         case DumpFormat::OTHER:
551             filename.append("unimplemented");
552             break;
553         default:
554             filename.append("unimplemented");
555             break;
556     }
557     filename.append(".heapsnapshot");
558     return ConvertToStdString(filename);
559 }
560 
GetTimeStamp()561 CString HeapProfiler::GetTimeStamp()
562 {
563     std::time_t timeSource = std::time(nullptr);
564     struct tm tm {
565     };
566     struct tm *timeData = localtime_r(&timeSource, &tm);
567     if (timeData == nullptr) {
568         LOG_FULL(FATAL) << "localtime_r failed";
569         UNREACHABLE();
570     }
571     CString stamp;
572     const int TIME_START = 1900;
573     stamp.append(ToCString(timeData->tm_year + TIME_START))
574         .append("-")
575         .append(ToCString(timeData->tm_mon + 1))
576         .append("-")
577         .append(ToCString(timeData->tm_mday))
578         .append("_")
579         .append(ToCString(timeData->tm_hour))
580         .append("-")
581         .append(ToCString(timeData->tm_min))
582         .append("-")
583         .append(ToCString(timeData->tm_sec));
584     return stamp;
585 }
586 
ForceFullGC(const EcmaVM * vm)587 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
588 {
589     if (vm->IsInitialized()) {
590         const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
591         return true;
592     }
593     return false;
594 }
595 
ForceSharedGC()596 void HeapProfiler::ForceSharedGC()
597 {
598     SharedHeap *sHeap = SharedHeap::GetInstance();
599     sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(vm_->GetAssociatedJSThread());
600     sHeap->GetSweeper()->WaitAllTaskFinished();
601 }
602 
MakeHeapSnapshot(SampleType sampleType,const DumpSnapShotOption & dumpOption,bool traceAllocation)603 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption,
604                                              bool traceAllocation)
605 {
606     LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
607     if (dumpOption.isFullGC) {
608         DISALLOW_GARBAGE_COLLECTION;
609         const_cast<Heap *>(vm_->GetHeap())->Prepare();
610     }
611     switch (sampleType) {
612         case SampleType::ONE_SHOT: {
613             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
614                                                            traceAllocation, entryIdMap_);
615             if (snapshot == nullptr) {
616                 LOG_FULL(FATAL) << "alloc snapshot failed";
617                 UNREACHABLE();
618             }
619             snapshot->BuildUp(dumpOption.isSimplify);
620             return snapshot;
621         }
622         case SampleType::REAL_TIME: {
623             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
624                                                            traceAllocation, entryIdMap_);
625             if (snapshot == nullptr) {
626                 LOG_FULL(FATAL) << "alloc snapshot failed";
627                 UNREACHABLE();
628             }
629             AddSnapshot(snapshot);
630             snapshot->PrepareSnapshot();
631             return snapshot;
632         }
633         default:
634             return nullptr;
635     }
636 }
637 
AddSnapshot(HeapSnapshot * snapshot)638 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
639 {
640     if (hprofs_.size() >= MAX_NUM_HPROF) {
641         ClearSnapshot();
642     }
643     ASSERT(snapshot != nullptr);
644     hprofs_.emplace_back(snapshot);
645 }
646 
ClearSnapshot()647 void HeapProfiler::ClearSnapshot()
648 {
649     for (auto *snapshot : hprofs_) {
650         GetChunk()->Delete(snapshot);
651     }
652     hprofs_.clear();
653 }
654 
StartHeapSampling(uint64_t samplingInterval,int stackDepth)655 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
656 {
657     if (heapSampling_.get()) {
658         LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
659         return false;
660     }
661     heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
662                                                    samplingInterval, stackDepth);
663     return true;
664 }
665 
StopHeapSampling()666 void HeapProfiler::StopHeapSampling()
667 {
668     heapSampling_.reset();
669 }
670 
GetAllocationProfile()671 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
672 {
673     if (!heapSampling_.get()) {
674         LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
675         return nullptr;
676     }
677     return heapSampling_->GetAllocationProfile();
678 }
679 
IsStartLocalHandleLeakDetect() const680 bool HeapProfiler::IsStartLocalHandleLeakDetect() const
681 {
682     return startLocalHandleLeakDetect_;
683 }
684 
SwitchStartLocalHandleLeakDetect()685 void HeapProfiler::SwitchStartLocalHandleLeakDetect()
686 {
687     startLocalHandleLeakDetect_ = !startLocalHandleLeakDetect_;
688 }
689 
IncreaseScopeCount()690 void HeapProfiler::IncreaseScopeCount()
691 {
692     ++scopeCount_;
693 }
694 
DecreaseScopeCount()695 void HeapProfiler::DecreaseScopeCount()
696 {
697     --scopeCount_;
698 }
699 
GetScopeCount() const700 uint32_t HeapProfiler::GetScopeCount() const
701 {
702     return scopeCount_;
703 }
704 
PushToActiveScopeStack(LocalScope * localScope,EcmaHandleScope * ecmaHandleScope)705 void HeapProfiler::PushToActiveScopeStack(LocalScope *localScope, EcmaHandleScope *ecmaHandleScope)
706 {
707     activeScopeStack_.emplace(std::make_shared<ScopeWrapper>(localScope, ecmaHandleScope));
708 }
709 
PopFromActiveScopeStack()710 void HeapProfiler::PopFromActiveScopeStack()
711 {
712     if (!activeScopeStack_.empty()) {
713         activeScopeStack_.pop();
714     }
715 }
716 
GetLastActiveScope() const717 std::shared_ptr<ScopeWrapper> HeapProfiler::GetLastActiveScope() const
718 {
719     if (!activeScopeStack_.empty()) {
720         return activeScopeStack_.top();
721     }
722     return nullptr;
723 }
724 
ClearHandleBackTrace()725 void HeapProfiler::ClearHandleBackTrace()
726 {
727     handleBackTrace_.clear();
728 }
729 
GetBackTraceOfHandle(const uintptr_t handle) const730 std::string_view HeapProfiler::GetBackTraceOfHandle(const uintptr_t handle) const
731 {
732     const auto it = handleBackTrace_.find(handle);
733     if (it != handleBackTrace_.end()) {
734         return std::string_view(it->second);
735     }
736     return "";
737 }
738 
InsertHandleBackTrace(uintptr_t handle,const std::string & backTrace)739 bool HeapProfiler::InsertHandleBackTrace(uintptr_t handle, const std::string &backTrace)
740 {
741     auto [iter, inserted] = handleBackTrace_.insert_or_assign(handle, backTrace);
742     return inserted;
743 }
744 
WriteToLeakStackTraceFd(std::ostringstream & buffer) const745 void HeapProfiler::WriteToLeakStackTraceFd(std::ostringstream &buffer) const
746 {
747     if (leakStackTraceFd_ < 0) {
748         return;
749     }
750     buffer << std::endl;
751     DPrintf(reinterpret_cast<fd_t>(leakStackTraceFd_), buffer.str());
752     buffer.str("");
753 }
754 
SetLeakStackTraceFd(const int32_t fd)755 void HeapProfiler::SetLeakStackTraceFd(const int32_t fd)
756 {
757     FdsanExchangeOwnerTag(reinterpret_cast<fd_t>(fd));
758     leakStackTraceFd_ = fd;
759 }
760 
GetLeakStackTraceFd() const761 int32_t HeapProfiler::GetLeakStackTraceFd() const
762 {
763     return leakStackTraceFd_;
764 }
765 
CloseLeakStackTraceFd()766 void HeapProfiler::CloseLeakStackTraceFd()
767 {
768     if (leakStackTraceFd_ != -1) {
769         FSync(reinterpret_cast<fd_t>(leakStackTraceFd_));
770         Close(reinterpret_cast<fd_t>(leakStackTraceFd_));
771         leakStackTraceFd_ = -1;
772     }
773 }
774 
StorePotentiallyLeakHandles(const uintptr_t handle)775 void HeapProfiler::StorePotentiallyLeakHandles(const uintptr_t handle)
776 {
777     bool isDetectedByScopeCount { GetScopeCount() <= 1 };
778     bool isDetectedByScopeTime { false };
779     if (auto lastScope = GetLastActiveScope()) {
780         auto timeSinceLastScopeCreate = lastScope->clockScope_.TotalSpentTime();
781         isDetectedByScopeTime = timeSinceLastScopeCreate >= LOCAL_HANDLE_LEAK_TIME_MS;
782     }
783     if (isDetectedByScopeCount || isDetectedByScopeTime) {
784         std::ostringstream stack;
785         Backtrace(stack, true);
786         InsertHandleBackTrace(handle, stack.str());
787     }
788 }
789 }  // namespace panda::ecmascript
790