• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <atomic>
16 #include <chrono>
17 #include <fcntl.h>
18 #include <sys/wait.h>
19 #include <sys/prctl.h>
20 #include <sys/stat.h>
21 #include <sys/syscall.h>
22 #include <thread>
23 #include <unistd.h>
24 #include "ecmascript/dfx/hprof/heap_profiler.h"
25 
26 #include "ecmascript/checkpoint/thread_state_transition.h"
27 #include "ecmascript/dfx/hprof/heap_snapshot.h"
28 #include "ecmascript/jspandafile/js_pandafile_manager.h"
29 #include "ecmascript/mem/heap-inl.h"
30 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
31 #include "ecmascript/base/block_hook_scope.h"
32 #include "ecmascript/dfx/hprof/heap_root_visitor.h"
33 #include "ecmascript/mem/object_xray.h"
34 
35 #if defined(ENABLE_DUMP_IN_FAULTLOG)
36 #include "faultloggerd_client.h"
37 #endif
38 
39 namespace panda::ecmascript {
ForkBySyscall(void)40 static pid_t ForkBySyscall(void)
41 {
42 #ifdef SYS_fork
43     return syscall(SYS_fork);
44 #else
45     return syscall(SYS_clone, SIGCHLD, 0);
46 #endif
47 }
48 
FindId(JSTaggedType addr)49 std::pair<bool, NodeId> EntryIdMap::FindId(JSTaggedType addr)
50 {
51     auto it = idMap_.find(addr);
52     if (it == idMap_.end()) {
53         return std::make_pair(false, GetNextId()); // return nextId if entry not exits
54     } else {
55         return std::make_pair(true, it->second);
56     }
57 }
58 
InsertId(JSTaggedType addr,NodeId id)59 bool EntryIdMap::InsertId(JSTaggedType addr, NodeId id)
60 {
61     auto it = idMap_.find(addr);
62     if (it == idMap_.end()) {
63         idMap_.emplace(addr, id);
64         return true;
65     }
66     idMap_[addr] = id;
67     return false;
68 }
69 
EraseId(JSTaggedType addr)70 bool EntryIdMap::EraseId(JSTaggedType addr)
71 {
72     auto it = idMap_.find(addr);
73     if (it == idMap_.end()) {
74         return false;
75     }
76     idMap_.erase(it);
77     return true;
78 }
79 
Move(JSTaggedType oldAddr,JSTaggedType forwardAddr)80 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
81 {
82     if (oldAddr == forwardAddr) {
83         return true;
84     }
85     auto it = idMap_.find(oldAddr);
86     if (it != idMap_.end()) {
87         NodeId id = it->second;
88         idMap_.erase(it);
89         idMap_[forwardAddr] = id;
90         return true;
91     }
92     return false;
93 }
94 
UpdateEntryIdMap(HeapSnapshot * snapshot)95 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
96 {
97     LOG_ECMA(INFO) << "EntryIdMap::UpdateEntryIdMap";
98     if (snapshot == nullptr) {
99         LOG_ECMA(FATAL) << "EntryIdMap::UpdateEntryIdMap:snapshot is nullptr";
100         UNREACHABLE();
101     }
102     auto nodes = snapshot->GetNodes();
103     CUnorderedMap<JSTaggedType, NodeId> newIdMap;
104     for (auto node : *nodes) {
105         auto addr = node->GetAddress();
106         auto it = idMap_.find(addr);
107         if (it != idMap_.end()) {
108             newIdMap.emplace(addr, it->second);
109         }
110     }
111     idMap_.clear();
112     idMap_ = newIdMap;
113 }
114 
HeapProfiler(const EcmaVM * vm)115 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
116 {
117     isProfiling_ = false;
118     entryIdMap_ = GetChunk()->New<EntryIdMap>();
119 }
120 
~HeapProfiler()121 HeapProfiler::~HeapProfiler()
122 {
123     JSPandaFileManager::GetInstance()->ClearNameMap();
124     ClearSnapshot();
125     GetChunk()->Delete(entryIdMap_);
126 }
127 
AllocationEvent(TaggedObject * address,size_t size)128 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
129 {
130     DISALLOW_GARBAGE_COLLECTION;
131     if (isProfiling_) {
132         // Id will be allocated later while add new node
133         if (heapTracker_ != nullptr) {
134             heapTracker_->AllocationEvent(address, size);
135         }
136     }
137 }
138 
MoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)139 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
140 {
141     LockHolder lock(mutex_);
142     if (isProfiling_) {
143         entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
144         if (heapTracker_ != nullptr) {
145             heapTracker_->MoveEvent(address, forwardAddress, size);
146         }
147     }
148 }
149 
UpdateHeapObjects(HeapSnapshot * snapshot)150 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
151 {
152     SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
153     snapshot->UpdateNodes();
154 }
155 
DumpHeapSnapshot(const DumpSnapShotOption & dumpOption)156 void HeapProfiler::DumpHeapSnapshot([[maybe_unused]] const DumpSnapShotOption &dumpOption)
157 {
158 #if defined(ENABLE_DUMP_IN_FAULTLOG)
159     // Write in faultlog for heap leak.
160     int32_t fd;
161     if (dumpOption.isDumpOOM && dumpOption.dumpFormat == DumpFormat::BINARY) {
162         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_RAW_SNAPSHOT));
163     } else {
164         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
165     }
166     if (fd < 0) {
167         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
168         return;
169     }
170     FileDescriptorStream stream(fd);
171     DumpHeapSnapshot(&stream, dumpOption);
172 #endif
173 }
174 
DoDump(Stream * stream,Progress * progress,const DumpSnapShotOption & dumpOption)175 bool HeapProfiler::DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)
176 {
177     int32_t heapCount = 0;
178     HeapSnapshot *snapshot = nullptr;
179     {
180         if (dumpOption.isFullGC) {
181             size_t heapSize = vm_->GetHeap()->GetLiveObjectSize();
182             LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
183             heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
184             if (progress != nullptr) {
185                 progress->ReportProgress(0, heapCount);
186             }
187         }
188         snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, dumpOption);
189         ASSERT(snapshot != nullptr);
190     }
191     entryIdMap_->UpdateEntryIdMap(snapshot);
192     isProfiling_ = true;
193     if (progress != nullptr) {
194         progress->ReportProgress(heapCount, heapCount);
195     }
196     if (!stream->Good()) {
197         FileStream newStream(GenDumpFileName(dumpOption.dumpFormat));
198         auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
199         GetChunk()->Delete(snapshot);
200         return serializerResult;
201     }
202     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
203     GetChunk()->Delete(snapshot);
204     return serializerResult;
205 }
206 
CheckAndRemoveWeak(JSTaggedValue & value,uint64_t originalAddr)207 static uint64_t CheckAndRemoveWeak(JSTaggedValue &value, uint64_t originalAddr)
208 {
209     if (!value.IsWeak()) {
210         return originalAddr;
211     }
212     JSTaggedValue weakValue(originalAddr);
213     weakValue.RemoveWeakTag();
214     return weakValue.GetRawData();
215 }
216 
CheckAndAddWeak(JSTaggedValue & value,uint64_t originalAddr)217 static uint64_t CheckAndAddWeak(JSTaggedValue &value, uint64_t originalAddr)
218 {
219     if (!value.IsWeak()) {
220         return originalAddr;
221     }
222     JSTaggedValue weakValue(originalAddr);
223     weakValue.CreateWeakRef();
224     return weakValue.GetRawData();
225 }
226 
VisitMember(ObjectSlot & slot,uint64_t objAddr,CUnorderedSet<uint64_t> & notFoundObj,JSHClass * jsHclass,CUnorderedMap<uint64_t,NewAddr * > & objMap)227 static uint64_t VisitMember(ObjectSlot &slot, uint64_t objAddr, CUnorderedSet<uint64_t> &notFoundObj,
228                             JSHClass *jsHclass, CUnorderedMap<uint64_t, NewAddr *> &objMap)
229 {
230     auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
231     JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
232     auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
233     originalAddr = CheckAndRemoveWeak(value, originalAddr);
234     if (!value.IsHeapObject() || originalAddr == 0) {
235         return 0LL;
236     }
237     auto toItemInfo = objMap.find(originalAddr);
238     if (toItemInfo == objMap.end()) {
239         LOG_ECMA(ERROR) << "ark raw heap decode visit " << std::hex << objAddr << ", type="
240                         << JSHClass::DumpJSType(jsHclass->GetObjectType())
241                         << ", not found member old addr=" << originalAddr;
242         notFoundObj.insert(reinterpret_cast<uint64_t>(*taggedPointerAddr));
243         return 0LL;
244     }
245     auto newAddr = reinterpret_cast<uint64_t>(toItemInfo->second->Data());
246     newAddr = CheckAndAddWeak(value, newAddr);
247     slot.Update(reinterpret_cast<TaggedObject *>(newAddr));
248     return newAddr;
249 }
250 
VisitObj(CUnorderedMap<uint64_t,NewAddr * > & objMap)251 CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> VisitObj(CUnorderedMap<uint64_t, NewAddr *> &objMap)
252 {
253     CUnorderedSet<uint64_t> notFoundObj;
254     CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> refSetMap; // old addr map to ref set
255     auto visitor = [&notFoundObj, &objMap, &refSetMap] (TaggedObject *root, ObjectSlot start,
256                                                         ObjectSlot end, VisitObjectArea area) {
257         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
258             return;
259         }
260         auto jsHclass = root->GetClass();
261         auto objAddr = reinterpret_cast<uint64_t>(root);
262         CUnorderedSet<uint64_t> *refSet = nullptr;
263         if (refSetMap.find(objAddr) != refSetMap.end()) {
264             refSet = &refSetMap[objAddr];
265         }
266         for (ObjectSlot slot = start; slot < end; slot++) {
267             auto newAddr = VisitMember(slot, objAddr, notFoundObj, jsHclass, objMap);
268             if (jsHclass->IsJsGlobalEnv() && refSet != nullptr && newAddr != 0LL) {
269                 refSet->insert(newAddr);
270             }
271         }
272     };
273     for (auto objInfo : objMap) {
274         auto newAddr = objInfo.second->Data();
275         auto jsHclassAddr = *reinterpret_cast<uint64_t *>(newAddr);
276         auto jsHclassItem = objMap.find(jsHclassAddr);
277         if (jsHclassItem == objMap.end()) {
278             LOG_ECMA(ERROR) << "ark raw heap decode hclass not find jsHclassAddr=" << std::hex << jsHclassAddr;
279             continue;
280         }
281         TaggedObject *obj = reinterpret_cast<TaggedObject *>(newAddr);
282         *reinterpret_cast<uint64_t *>(newAddr) = reinterpret_cast<uint64_t>(jsHclassItem->second->Data());
283         auto jsHclass = reinterpret_cast<JSHClass *>(jsHclassItem->second->Data());
284         if (jsHclass->IsString()) {
285             continue;
286         }
287         if (jsHclass->IsJsGlobalEnv()) {
288             refSetMap.emplace(reinterpret_cast<uint64_t>(newAddr), CUnorderedSet<uint64_t>());
289         }
290         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, jsHclass, visitor);
291     }
292     if (notFoundObj.size() > 0) {
293         LOG_ECMA(ERROR) << "ark raw heap decode visit obj: not found obj num=" << notFoundObj.size();
294     }
295     return refSetMap;
296 }
297 
GetFileSize(std::string & inputFilePath)298 static uint64_t GetFileSize(std::string &inputFilePath)
299 {
300     if (inputFilePath.empty()) {
301         return 0;
302     }
303     struct stat fileInfo;
304     if (stat(inputFilePath.c_str(), &fileInfo) == 0) {
305         return fileInfo.st_size;
306     }
307     return 0;
308 }
309 
ReadFileAtOffset(std::ifstream & file,uint32_t offset,char * buf,uint32_t size)310 bool ReadFileAtOffset(std::ifstream &file, uint32_t offset, char *buf, uint32_t size)
311 {
312     if (buf == nullptr) {
313         LOG_ECMA(ERROR) << "ark raw heap decode file buf is nullptr";
314         return false;
315     }
316     if (!file.is_open()) {
317         LOG_ECMA(ERROR) << "ark raw heap decode file not open";
318         return false;
319     }
320     file.clear();
321     if (!file.seekg(offset)) {
322         LOG_ECMA(ERROR) << "ark raw heap decode file set offset failed, offset=" << offset;
323         return false;
324     }
325     if (file.read(buf, size).fail()) {
326         LOG_ECMA(ERROR) << "ark raw heap decode file read failed, offset=" << offset;
327         return false;
328     }
329     return true;
330 }
331 
DecodeMemObj(std::ifstream & file,CVector<uint32_t> & sections)332 CUnorderedMap<uint64_t, NewAddr *> DecodeMemObj(std::ifstream &file, CVector<uint32_t> &sections)
333 {
334     CUnorderedMap<uint64_t, NewAddr *> objMap; // old addr map to new obj
335     uint32_t heapTotalSize = 0;
336     uint32_t objTotalNum = 0;
337     for (uint32_t sec = 4; sec + 1 < sections.size(); sec += 2) { // 2 :step is 2
338         uint32_t offset = sections[sec];
339         uint32_t secHead[2];
340         if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
341             LOG_ECMA(ERROR) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset="
342                             << offset << ", size=" << sections[sec + 1];
343             return objMap;
344         }
345         LOG_ECMA(INFO) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset=" << offset
346                         << ", size=" << sections[sec + 1] << ", obj num=" << secHead[0];
347         auto tbSize = secHead[0] * sizeof(AddrTableItem);
348         if (secHead[1] != sizeof(AddrTableItem) || tbSize == 0 || tbSize > MAX_OBJ_SIZE) {
349             LOG_ECMA(ERROR) << "ark raw heap decode check obj table section=" << sections[sec] << ", head size="
350                             << sizeof(AddrTableItem) << ", but=" << secHead[1] << "or error table size=" << tbSize;
351             continue;
352         }
353         CVector<char> objTabBuf(tbSize);
354         file.read(objTabBuf.data(), tbSize);
355         auto objTab = reinterpret_cast<AddrTableItem *>(objTabBuf.data());
356         offset += sizeof(secHead);
357         objTotalNum += secHead[0];
358         for (uint32_t i = 0; i < secHead[0]; i++) {
359             heapTotalSize += objTab[i].objSize;
360             auto actSize = i + 1 < secHead[0] ? objTab[i + 1].offset - objTab[i].offset :
361                            sections[sec + 1] - objTab[i].offset - sizeof(secHead);
362             if (actSize != objTab[i].objSize && actSize != sizeof(uint64_t)) {
363                 auto tabOffset = offset + i * sizeof(AddrTableItem);
364                 LOG_ECMA(ERROR) << "ark raw heap decode check obj size i=" << i << std::hex << ", offset=" << tabOffset
365                                 << ", addr=" << objTab[i].addr << ", size=" << objTab[i].objSize << " but=" << actSize;
366                 continue;
367             }
368             objMap.emplace(objTab[i].addr, new NewAddr(actSize, objTab[i].objSize));
369             auto result = ReadFileAtOffset(file, offset + objTab[i].offset, objMap[objTab[i].addr]->Data(), actSize);
370             if (!result) {
371                 LOG_ECMA(ERROR) << "ark raw heap decode read failed, i=" << i << ", base offset=" << offset
372                                 << ", obj addr=" << objTab[i].addr << ", read size=" << actSize;
373                 return objMap;
374             }
375         }
376     }
377     LOG_ECMA(INFO) << "ark raw heap decode read obj, num=" << objTotalNum << ", size=" << heapTotalSize;
378     return objMap;
379 }
380 
DecodeStrTable(StringHashMap * strTable,std::ifstream & file,uint32_t offset,uint32_t secSize)381 CUnorderedMap<uint64_t, CString *> DecodeStrTable(StringHashMap *strTable, std::ifstream &file,
382                                                   uint32_t offset, uint32_t secSize)
383 {
384     uint32_t secHead[2];
385     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
386         LOG_ECMA(ERROR) << "ark raw heap decode read str table failed, offset=" << offset << ", size=" << secSize;
387         return CUnorderedMap<uint64_t, CString *>(0);
388     }
389     uint32_t byteNum = secSize - sizeof(secHead);
390     char *charPtr = new char[byteNum];
391     file.read(charPtr, byteNum);
392     CUnorderedMap<uint64_t, CString *> strTabMap; // old addr map to str id
393     uint32_t cnt = 0;
394     uint32_t baseOff = 0;
395     while (cnt++ < secHead[0]) {
396         uint32_t *u32Ptr = reinterpret_cast<uint32_t *>(charPtr + baseOff);
397         auto strOffset = (u32Ptr[1] + 1) * sizeof(uint64_t) + baseOff;
398         auto getSize = strlen(charPtr + strOffset);
399         if (u32Ptr[0] != getSize) {
400             LOG_ECMA(ERROR) << cnt << " ark raw heap decode check str size=" << u32Ptr[0] << ", but=" << getSize<<"\n";
401         }
402         auto strAddr = strTable->GetString(charPtr + strOffset);
403         uint32_t num = 0;
404         uint64_t *u64Ptr = reinterpret_cast<uint64_t *>(&u32Ptr[2]);
405         while (num < u32Ptr[1]) {
406             strTabMap[u64Ptr[num]] = strAddr;
407             num++;
408         }
409         baseOff = strOffset + u32Ptr[0] + 1;
410     }
411     delete[] charPtr;
412     LOG_ECMA(INFO) << "ark raw heap decode string table size=" << strTable->GetCapcity();
413     return strTabMap;
414 }
415 
DecodeRootTable(std::ifstream & file,uint32_t offset,uint32_t secSize)416 CUnorderedSet<uint64_t> DecodeRootTable(std::ifstream &file, uint32_t offset, uint32_t secSize)
417 {
418     uint32_t secHead[2];
419     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
420         LOG_ECMA(ERROR) << "ark raw heap decode read root table failed, offset=" << offset << ", size=" << secSize;
421         return CUnorderedSet<uint64_t>(0);
422     }
423     if (secHead[1] != sizeof(uint64_t)) {
424         LOG_ECMA(ERROR) << "ark raw heap decode error root size, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
425         return CUnorderedSet<uint64_t>(0);
426     }
427     auto checkSize = sizeof(uint64_t) * secHead[0] + sizeof(secHead);
428     if (secSize != checkSize) {
429         LOG_ECMA(ERROR) << "ark raw heap decode check root section size=" << secSize << ", but=" << checkSize;
430         return CUnorderedSet<uint64_t>(0);
431     }
432     CVector<uint64_t> rootVec(secHead[0]);
433     file.read(reinterpret_cast<char *>(rootVec.data()), sizeof(uint64_t) * secHead[0]);
434     CUnorderedSet<uint64_t> rootSet;
435     for (auto addr : rootVec) {
436         rootSet.insert(addr);
437     }
438     LOG_ECMA(INFO) << "ark raw heap decode root obj num=" << rootSet.size();
439     return rootSet;
440 }
441 
GetSectionInfo(std::ifstream & file,uint64_t fileSize)442 CVector<uint32_t> GetSectionInfo(std::ifstream &file, uint64_t fileSize)
443 {
444     uint32_t secHead[2];
445     uint32_t fileOffset = fileSize - sizeof(uint32_t) * 2; // 2 : last 2 uint32
446     file.seekg(fileOffset);
447     file.read(reinterpret_cast<char *>(secHead), sizeof(secHead));
448     if (secHead[1] != sizeof(uint32_t)) {
449         LOG_ECMA(ERROR) << "ark raw heap decode unexpect head, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
450         return CVector<uint32_t>(0);
451     }
452     CVector<uint32_t> secInfo(secHead[0]); // last 4 byte is section num
453     auto secInfoSize = secHead[0] * secHead[1];
454     fileOffset -= secInfoSize;
455     file.seekg(fileOffset);
456     file.read(reinterpret_cast<char *>(secInfo.data()), secInfoSize);
457     return secInfo;
458 }
459 
ClearObjMem(CUnorderedMap<uint64_t,NewAddr * > & objMap)460 void ClearObjMem(CUnorderedMap<uint64_t, NewAddr *> &objMap)
461 {
462     for (auto objItem : objMap) {
463         delete objItem.second;
464     }
465     objMap.clear();
466 }
467 
GetValidFileSize(std::string & inputFilePath,uint64_t & fileSize)468 static bool GetValidFileSize(std::string &inputFilePath, uint64_t &fileSize)
469 {
470     fileSize = GetFileSize(inputFilePath);
471     if (fileSize == 0) {
472         LOG_ECMA(ERROR) << "ark raw heap decode get file size=0";
473         return false;
474     }
475     if (fileSize > MAX_FILE_SIZE) {
476         LOG_ECMA(ERROR) << "ark raw heap decode get file size > 4GB, unsupported";
477         return false;
478     }
479     return true;
480 }
481 
GenerateHeapSnapshot(std::string & inputFilePath,std::string & outputPath)482 bool HeapProfiler::GenerateHeapSnapshot(std::string &inputFilePath, std::string &outputPath)
483 {
484     LOG_ECMA(INFO) << "ark raw heap decode start target=" << outputPath;
485     std::string realPath;
486     if (!RealPath(inputFilePath, realPath)) {
487         LOG_ECMA(ERROR) << "get real path failed:" << inputFilePath;
488         return false;
489     }
490     uint64_t fileSize;
491     if (!GetValidFileSize(realPath, fileSize)) {
492         return false;
493     }
494     std::ifstream file(realPath, std::ios::binary);
495     if (!file.is_open()) {
496         LOG_ECMA(ERROR) << "ark raw heap decode file failed:" << realPath;
497         return false;
498     }
499     CVector<uint32_t> sections = GetSectionInfo(file, fileSize);
500     if (sections.size() == 0) {
501         LOG_ECMA(ERROR) << "ark raw heap decode not found section data";
502         return false;
503     }
504     auto objMap = DecodeMemObj(file, sections);
505     auto refSetMap = VisitObj(objMap);
506     auto rootSet = DecodeRootTable(file, sections[0], sections[1]);
507     auto strTabMap = DecodeStrTable(GetEcmaStringTable(), file, sections[2], sections[3]);
508     file.close();
509     DumpSnapShotOption dp;
510     auto *snapshot = new HeapSnapshot(vm_, GetEcmaStringTable(), dp, false, entryIdMap_);
511     LOG_ECMA(INFO) << "ark raw heap decode generate nodes=" << objMap.size();
512     snapshot->GenerateNodeForBinMod(objMap, rootSet, strTabMap);
513     rootSet.clear();
514     strTabMap.clear();
515     LOG_ECMA(INFO) << "ark raw heap decode fill edges=" << objMap.size();
516     snapshot->BuildSnapshotForBinMod(objMap, refSetMap);
517     refSetMap.clear();
518     ClearObjMem(objMap);
519     if (outputPath.empty()) {
520         outputPath = GenDumpFileName(dp.dumpFormat);
521     } else if (outputPath.back() == '/') {
522         outputPath += GenDumpFileName(dp.dumpFormat);
523     }
524     LOG_ECMA(INFO) << "ark raw heap decode serialize file=" << outputPath.c_str();
525     FileStream newStream(outputPath);
526     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
527     delete snapshot;
528     LOG_ECMA(INFO) << "ark raw heap decode finish";
529     return serializerResult;
530 }
531 
WaitProcess(pid_t pid)532 [[maybe_unused]]static void WaitProcess(pid_t pid)
533 {
534     time_t startTime = time(nullptr);
535     constexpr int DUMP_TIME_OUT = 300;
536     constexpr int DEFAULT_SLEEP_TIME = 100000;
537     while (true) {
538         int status = 0;
539         pid_t p = waitpid(pid, &status, WNOHANG);
540         if (p < 0 || p == pid) {
541             break;
542         }
543         if (time(nullptr) > startTime + DUMP_TIME_OUT) {
544             LOG_GC(ERROR) << "DumpHeapSnapshot kill thread, wait " << DUMP_TIME_OUT << " s";
545             kill(pid, SIGTERM);
546             break;
547         }
548         usleep(DEFAULT_SLEEP_TIME);
549     }
550 }
551 
552 template<typename Callback>
IterateSharedHeap(Callback & cb)553 void IterateSharedHeap(Callback &cb)
554 {
555     auto heap = SharedHeap::GetInstance();
556     heap->GetOldSpace()->IterateOverObjects(cb);
557     heap->GetCompressSpace()->IterateOverObjects(cb);
558     heap->GetNonMovableSpace()->IterateOverObjects(cb);
559     heap->GetHugeObjectSpace()->IterateOverObjects(cb);
560     heap->GetAppSpawnSpace()->IterateOverObjects(cb);
561     heap->GetReadOnlySpace()->IterateOverObjects(cb);
562 }
563 
GetHeapCntAndSize(const EcmaVM * vm)564 std::pair<uint64_t, uint64_t> GetHeapCntAndSize(const EcmaVM *vm)
565 {
566     uint64_t cnt = 0;
567     uint64_t objectSize = 0;
568     auto cb = [&objectSize, &cnt]([[maybe_unused]] TaggedObject *obj) {
569         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
570         ++cnt;
571     };
572     vm->GetHeap()->IterateOverObjects(cb, false);
573     return std::make_pair(cnt, objectSize);
574 }
575 
GetSharedCntAndSize()576 std::pair<uint64_t, uint64_t> GetSharedCntAndSize()
577 {
578     uint64_t cnt = 0;
579     uint64_t size = 0;
580     auto cb = [&cnt, &size](TaggedObject *obj) {
581         cnt++;
582         size += obj->GetClass()->SizeFromJSHClass(obj);
583     };
584     IterateSharedHeap(cb);
585     return std::make_pair(cnt, size);
586 }
587 
GetRootObjects(const EcmaVM * vm)588 static CUnorderedSet<TaggedObject*> GetRootObjects(const EcmaVM *vm)
589 {
590     CUnorderedSet<TaggedObject*> result {};
591     HeapRootVisitor visitor;
592     uint32_t rootCnt1 = 0;
593     RootVisitor rootEdgeBuilder = [&result, &rootCnt1](
594         [[maybe_unused]] Root type, ObjectSlot slot) {
595         JSTaggedValue value((slot).GetTaggedType());
596         if (!value.IsHeapObject()) {
597             return;
598         }
599         ++rootCnt1;
600         TaggedObject *root = value.GetTaggedObject();
601         result.insert(root);
602     };
603     RootBaseAndDerivedVisitor rootBaseEdgeBuilder = []
604         ([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, [[maybe_unused]] ObjectSlot derived,
605          [[maybe_unused]] uintptr_t baseOldObject) {
606     };
607     uint32_t rootCnt2 = 0;
608     RootRangeVisitor rootRangeEdgeBuilder = [&result, &rootCnt2]([[maybe_unused]] Root type,
609         ObjectSlot start, ObjectSlot end) {
610         for (ObjectSlot slot = start; slot < end; slot++) {
611             JSTaggedValue value((slot).GetTaggedType());
612             if (!value.IsHeapObject()) {
613                 continue;
614             }
615             ++rootCnt2;
616             TaggedObject *root = value.GetTaggedObject();
617             result.insert(root);
618         }
619     };
620     visitor.VisitHeapRoots(vm->GetJSThread(), rootEdgeBuilder, rootRangeEdgeBuilder, rootBaseEdgeBuilder);
621     SharedModuleManager::GetInstance()->Iterate(rootEdgeBuilder);
622     Runtime::GetInstance()->IterateCachedStringRoot(rootRangeEdgeBuilder);
623     return result;
624 }
625 
GetNotFoundObj(const EcmaVM * vm)626 size_t GetNotFoundObj(const EcmaVM *vm)
627 {
628     size_t heapTotalSize = 0;
629     CUnorderedSet<TaggedObject*> allHeapObjSet {};
630     auto handleObj = [&allHeapObjSet, &heapTotalSize](TaggedObject *obj) {
631         allHeapObjSet.insert(obj);
632         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
633         heapTotalSize += objSize;
634     };
635     vm->GetHeap()->IterateOverObjects(handleObj, false);
636     vm->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
637     IterateSharedHeap(handleObj);
638     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound heap count:" << allHeapObjSet.size()
639                    << ", heap size=" << heapTotalSize;
640     CUnorderedSet<TaggedObject *> notFoundObjSet {};
641     auto visitor = [&notFoundObjSet, &allHeapObjSet] ([[maybe_unused]]TaggedObject *root, ObjectSlot start,
642                                                       ObjectSlot end, VisitObjectArea area) {
643         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
644             return;
645         }
646         for (ObjectSlot slot = start; slot < end; slot++) {
647             auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
648             JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
649             auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
650             if (!value.IsHeapObject() || originalAddr == 0) {
651                 continue;
652             }
653             if (value.IsWeakForHeapObject()) {
654                 originalAddr -= 1;
655             }
656             if (allHeapObjSet.find(reinterpret_cast<TaggedObject *>(originalAddr)) != allHeapObjSet.end()) {
657                 continue;
658             }
659             auto obj = reinterpret_cast<TaggedObject *>(*taggedPointerAddr);
660             if (notFoundObjSet.find(obj) != notFoundObjSet.end()) {
661                 continue;
662             }
663             notFoundObjSet.insert(obj);
664         }
665     };
666     for (auto obj : allHeapObjSet) {
667         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, obj->GetClass(), visitor);
668     }
669     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound not found count:" << notFoundObjSet.size();
670     return notFoundObjSet.size();
671 }
672 
CopyObjectMem2Buf(char * objTable,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBufMap)673 uint32_t HeapProfiler::CopyObjectMem2Buf(char *objTable, uint32_t objNum,
674                                          CVector<std::pair<char *, uint32_t>> &memBufMap)
675 {
676     char *currMemBuf = nullptr;
677     uint32_t currSize = 0;
678     uint32_t totalSize = 0;
679     uint32_t curOffset = objNum * sizeof(AddrTableItem);
680     auto objHeaders = reinterpret_cast<AddrTableItem *>(objTable);
681     for (uint32_t j = 0; j < objNum; ++j) {
682         auto obj = reinterpret_cast<TaggedObject *>(objHeaders[j].addr);
683         JSTaggedValue value(obj);
684         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
685         totalSize += objSize;
686         if (currSize + objSize > PER_GROUP_MEM_SIZE || currMemBuf == nullptr) {
687             if (currMemBuf != nullptr) {
688                 memBufMap.push_back({currMemBuf, currSize});
689             }
690             currSize = 0;
691             currMemBuf = chunk_.NewArray<char>(objSize > PER_GROUP_MEM_SIZE? objSize : PER_GROUP_MEM_SIZE);
692         }
693         objHeaders[j].objSize = objSize;
694         objHeaders[j].offset = curOffset;
695         int32_t ret;
696         if (value.IsString()) {
697             CVector<uint64_t> strTmp(objSize / sizeof(uint64_t), 0);
698             strTmp[0] = *reinterpret_cast<uint64_t *>(objHeaders[j].addr);
699             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(strTmp.data()), objSize);
700         } else {
701             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(objHeaders[j].addr), objSize);
702         }
703         if (ret != 0) {
704             LOG_ECMA(ERROR) << "ark raw heap dump CopyObjectMem memcpy_s failed, currSize="
705                             << currSize << ",objSize=" << objSize << ",addr=" << objHeaders[j].addr;
706             return totalSize;
707         }
708         curOffset += objSize;
709         currSize += objSize;
710     }
711     if (currSize > 0) {
712         memBufMap.push_back({currMemBuf, currSize});
713     } else if (currMemBuf != nullptr) {
714         chunk_.Delete<char>(currMemBuf);
715     }
716     return totalSize;
717 }
718 
GenObjTable(CUnorderedMap<char *,uint32_t> & headerMap,HeapSnapshot * snapshot,CUnorderedMap<uint64_t,CVector<uint64_t>> & strIdMap)719 uint32_t HeapProfiler::GenObjTable(CUnorderedMap<char *, uint32_t> &headerMap, HeapSnapshot *snapshot,
720                                    CUnorderedMap<uint64_t, CVector<uint64_t>> &strIdMap)
721 {
722     char *currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
723     uint32_t index = 0;
724     uint32_t objNum = 0;
725     auto table = reinterpret_cast<AddrTableItem *>(currBuf);
726     auto handleObj = [&index, &table, &objNum, &headerMap, &currBuf, &snapshot, &strIdMap, this](TaggedObject *obj) {
727         JSTaggedValue value(obj);
728         auto taggedType = value.GetRawData();
729         auto [exist, id] = entryIdMap_->FindId(taggedType);
730         if (!exist) {
731             entryIdMap_->InsertId(taggedType, id);
732         }
733         table[index].addr = reinterpret_cast<uint64_t>(obj);
734         table[index].id = id;
735         auto strId = snapshot->GenerateStringId(obj);
736         if (strId != 1) { // 1 : invalid str id
737             if (strIdMap.find(strId) == strIdMap.end()) {
738                 strIdMap.emplace(strId, CVector<uint64_t>());
739             }
740             strIdMap[strId].push_back(table[index].addr);
741         }
742         index++;
743         if (index == HEAD_NUM_PER_GROUP) {
744             headerMap.emplace(currBuf, index);
745             objNum += HEAD_NUM_PER_GROUP;
746             index = 0;
747             currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
748             table = reinterpret_cast<AddrTableItem *>(currBuf);
749         }
750     };
751     vm_->GetHeap()->IterateOverObjects(handleObj, false);
752     vm_->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
753     IterateSharedHeap(handleObj);
754     objNum += index;
755     if (index != 0) {
756         headerMap.emplace(currBuf, index);
757     } else {
758         chunk_.Delete<char>(currBuf);
759     }
760     return objNum;
761 }
762 
763 // 4 byte: root_num
764 // 4 byte: unit size = sizeof(addr), 8 byte here
765 // {8 byte: root obj addr} * root_num
GenRootTable(Stream * stream)766 uint32_t HeapProfiler::GenRootTable(Stream *stream)
767 {
768     auto roots = GetRootObjects(vm_);
769     uint32_t rootSecHeadSize = 8; // 8 : root num 、 unit size
770     auto rootSecSize = roots.size() * (sizeof(TaggedObject *)) + rootSecHeadSize;
771     auto memBuf = chunk_.NewArray<char>(rootSecSize);
772     uint32_t *rootHeader = reinterpret_cast<uint32_t *>(memBuf);
773     uint64_t *rootBuf = reinterpret_cast<uint64_t *>(memBuf + rootSecHeadSize); // 8 : root addr start offset
774     rootHeader[0] = roots.size(); // 0: root num
775     rootHeader[1] = sizeof(TaggedObject *); // 1: unit size
776     auto currInd = 0;
777     for (auto root : roots) {
778         rootBuf[currInd++] = reinterpret_cast<uint64_t>(root);
779     }
780     LOG_ECMA(INFO) << "ark raw heap dump GenRootTable root cnt="<<roots.size();
781     stream->WriteBinBlock(memBuf, rootSecSize);
782     chunk_.Delete<char>(memBuf);
783     return rootSecSize;
784 }
785 
786 
787 // 4 byte: obj_num
788 // 4 byte: unit size = sizeof(AddrTableItem)
789 // {AddrTableItem} * obj_num
790 // {obj contents} * obj_num
WriteToBinFile(Stream * stream,char * objTab,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBuf)791 uint32_t HeapProfiler::WriteToBinFile(Stream *stream, char *objTab, uint32_t objNum,
792                                       CVector<std::pair<char *, uint32_t>> &memBuf)
793 {
794     uint32_t secHeader[] = {objNum, sizeof(AddrTableItem)};
795     uint32_t secTotalSize = sizeof(secHeader);
796     stream->WriteBinBlock(reinterpret_cast<char *>(secHeader), secTotalSize);
797     uint32_t headerSize = objNum * sizeof(AddrTableItem);
798     secTotalSize += headerSize;
799     stream->WriteBinBlock(objTab, headerSize); // write obj header
800     chunk_.Delete<char>(objTab);
801     for (auto memItem : memBuf) {
802         stream->WriteBinBlock(memItem.first, memItem.second);
803         secTotalSize += memItem.second;
804         chunk_.Delete<char>(memItem.first);
805     }
806     return secTotalSize;
807 }
808 
DumpRawHeap(Stream * stream,uint32_t & fileOffset,CVector<uint32_t> & secIndexVec)809 bool HeapProfiler::DumpRawHeap(Stream *stream, uint32_t &fileOffset, CVector<uint32_t> &secIndexVec)
810 {
811     CUnorderedMap<char *, uint32_t> objTabMap; // buf map table num
812     CUnorderedMap<uint64_t, CVector<uint64_t>> strIdMapObjVec; // string id map to objs vector
813     DumpSnapShotOption op;
814     auto snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), op, false, entryIdMap_);
815     uint32_t objTotalNum = GenObjTable(objTabMap, snapshot, strIdMapObjVec);
816     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap totalObjNumber=" << objTotalNum;
817     CVector<CVector<std::pair<char *, uint32_t>>> allMemBuf(objTabMap.size(), CVector<std::pair<char *, uint32_t>>());
818     CVector<std::thread> threadsVec;
819     CVector<char *> objTabVec(objTabMap.size());
820     uint32_t index = 0;
821     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap start to copy, thread num=" << objTabMap.size();
822     for (auto tableItem : objTabMap) {
823         auto tdCb = [this, &tableItem, &allMemBuf, &index] () {
824             CopyObjectMem2Buf(tableItem.first, tableItem.second, allMemBuf[index]);
825         };
826         threadsVec.emplace_back(tdCb);
827         objTabVec[index] = tableItem.first;
828         threadsVec[index].join();
829         ++index;
830     }
831     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write string, num=" << strIdMapObjVec.size();
832     secIndexVec.push_back(fileOffset); // string table section offset
833     auto size = HeapSnapshotJSONSerializer::DumpStringTable(GetEcmaStringTable(), stream, strIdMapObjVec);
834     secIndexVec.push_back(size); // string table section size
835     GetChunk()->Delete(snapshot);
836     fileOffset += size;
837     strIdMapObjVec.clear();
838     uint32_t finCnt = 0;
839     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write obj, offset=" << fileOffset;
840     while (finCnt < threadsVec.size()) {
841         for (index = 0; index < threadsVec.size(); ++index) {
842             if (threadsVec[index].joinable()) { // thread not finished
843                 continue;
844             }
845             ++finCnt;
846             secIndexVec.push_back(fileOffset); // current section offset
847             auto objNum = objTabMap[objTabVec[index]];
848             auto currSecSize = WriteToBinFile(stream, objTabVec[index], objNum, allMemBuf[index]);
849             LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write offset=" << fileOffset << ", size=" << currSecSize;
850             secIndexVec.push_back(currSecSize); // current section size
851             fileOffset += currSecSize;
852         }
853     }
854     return true;
855 }
856 
857 //  * 8 byte: version id
858 //  * root table section
859 //  * string table section
860 //  * {heap section / share heap section} * thread_num
861 //  * 4 byte: root table section offset
862 //  * 4 byte: root table section size
863 //  * 4 byte: string table section offset
864 //  * 4 byte: string table section size
865 //  * {
866 //  * 4 byte: obj section offset
867 //  * 4 byte: obj section size
868 //  * } * thread_num
869 //  * 4 byte: section_offset_num size, 4 byte here
870 //  * 4 byte: section_num
BinaryDump(Stream * stream,const DumpSnapShotOption & dumpOption)871 bool HeapProfiler::BinaryDump(Stream *stream, [[maybe_unused]] const DumpSnapShotOption &dumpOption)
872 {
873     char versionID[VERSION_ID_SIZE] = { 0 };
874     LOG_ECMA(INFO) << "ark raw heap dump start, version is: " << versionID;
875     stream->WriteBinBlock(versionID, VERSION_ID_SIZE);
876     CQueue<CVector<TaggedObject *>> needStrObjQue;
877     // a vector to index all sections, [offset, section_size, offset, section_size, ...]
878     CVector<uint32_t> secIndexVec(2); // 2 : section head size
879     uint32_t fileOffset = VERSION_ID_SIZE;
880     secIndexVec[0] = fileOffset;
881     LOG_ECMA(INFO) << "ark raw heap dump GenRootTable";
882     auto rootSectionSize = GenRootTable(stream);
883     secIndexVec[1] = rootSectionSize; // root section offset
884     fileOffset += rootSectionSize; // root section size
885     DumpRawHeap(stream, fileOffset, secIndexVec);
886     secIndexVec.push_back(secIndexVec.size()); // 4 byte is section num
887     secIndexVec.push_back(sizeof(uint32_t)); // the penultimate is section index data bytes number
888     stream->WriteBinBlock(reinterpret_cast<char *>(secIndexVec.data()), secIndexVec.size() *sizeof(uint32_t));
889 #ifdef OHOS_UNIT_TEST
890     LOG_ECMA(INFO) << "ark raw heap dump UT check obj self-contained";
891     size_t ret = GetNotFoundObj(vm_);
892     return ret == 0;
893 #else
894     LOG_ECMA(INFO) << "ark raw heap dump finished num=" << secIndexVec.size();
895     return true;
896 #endif
897 }
898 
FillIdMap()899 void HeapProfiler::FillIdMap()
900 {
901     EntryIdMap* newEntryIdMap = GetChunk()->New<EntryIdMap>();
902     // Iterate SharedHeap Object
903     SharedHeap* sHeap = SharedHeap::GetInstance();
904     if (sHeap != nullptr) {
905         sHeap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
906             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
907             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
908             newEntryIdMap->InsertId(addr, sequenceId);
909         });
910     }
911 
912     // Iterate LocalHeap Object
913     auto heap = vm_->GetHeap();
914     if (heap != nullptr) {
915         heap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
916             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
917             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
918             newEntryIdMap->InsertId(addr, sequenceId);
919         });
920     }
921 
922     // copy entryIdMap
923     CUnorderedMap<JSTaggedType, NodeId>* idMap = entryIdMap_->GetIdMap();
924     CUnorderedMap<JSTaggedType, NodeId>* newIdMap = newEntryIdMap->GetIdMap();
925     *idMap = *newIdMap;
926 
927     GetChunk()->Delete(newEntryIdMap);
928 }
929 
DumpHeapSnapshot(Stream * stream,const DumpSnapShotOption & dumpOption,Progress * progress)930 bool HeapProfiler::DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress)
931 {
932     bool res = false;
933     base::BlockHookScope blockScope;
934     ThreadManagedScope managedScope(vm_->GetJSThread());
935     pid_t pid = -1;
936     {
937         if (dumpOption.isFullGC) {
938             [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
939             ASSERT(heapClean);
940         }
941         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread()); // suspend All.
942         if (dumpOption.isFullGC) {
943             DISALLOW_GARBAGE_COLLECTION;
944             const_cast<Heap *>(vm_->GetHeap())->Prepare();
945             SharedHeap::GetInstance()->Prepare(true);
946         }
947         Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
948             ASSERT(!thread->IsInRunningState());
949             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
950         });
951         // OOM and ThresholdReachedDump.
952         if (dumpOption.isDumpOOM) {
953             res = BinaryDump(stream, dumpOption);
954             stream->EndOfStream();
955             return res;
956         }
957         // ide.
958         if (dumpOption.isSync) {
959             return DoDump(stream, progress, dumpOption);
960         }
961         // hidumper do fork and fillmap.
962         if (dumpOption.isBeforeFill) {
963             FillIdMap();
964         }
965         // fork
966         if ((pid = ForkBySyscall()) < 0) {
967             LOG_ECMA(ERROR) << "DumpHeapSnapshot fork failed!";
968             return false;
969         }
970         if (pid == 0) {
971             vm_->GetAssociatedJSThread()->EnableCrossThreadExecution();
972             prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("dump_process"), 0, 0, 0);
973             res = DoDump(stream, progress, dumpOption);
974             _exit(0);
975         }
976     }
977     if (pid != 0) {
978         std::thread thread(&WaitProcess, pid);
979         thread.detach();
980         stream->EndOfStream();
981     }
982     isProfiling_ = true;
983     return res;
984 }
985 
StartHeapTracking(double timeInterval,bool isVmMode,Stream * stream,bool traceAllocation,bool newThread)986 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
987                                      bool traceAllocation, bool newThread)
988 {
989     vm_->CollectGarbage(TriggerGCType::OLD_GC);
990     ForceSharedGC();
991     SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
992     DumpSnapShotOption dumpOption;
993     dumpOption.isVmMode = isVmMode;
994     dumpOption.isPrivate = false;
995     dumpOption.captureNumericValue = false;
996     HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, dumpOption, traceAllocation);
997     if (snapshot == nullptr) {
998         return false;
999     }
1000     isProfiling_ = true;
1001     UpdateHeapObjects(snapshot);
1002     heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
1003     const_cast<EcmaVM *>(vm_)->StartHeapTracking();
1004     if (newThread) {
1005         heapTracker_->StartTracing();
1006     }
1007 
1008     return true;
1009 }
1010 
UpdateHeapTracking(Stream * stream)1011 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
1012 {
1013     if (heapTracker_ == nullptr) {
1014         return false;
1015     }
1016     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1017     if (snapshot == nullptr) {
1018         return false;
1019     }
1020 
1021     {
1022         vm_->CollectGarbage(TriggerGCType::OLD_GC);
1023         ForceSharedGC();
1024         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1025         snapshot->RecordSampleTime();
1026         UpdateHeapObjects(snapshot);
1027     }
1028 
1029     if (stream != nullptr) {
1030         snapshot->PushHeapStat(stream);
1031     }
1032 
1033     return true;
1034 }
1035 
StopHeapTracking(Stream * stream,Progress * progress,bool newThread)1036 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
1037 {
1038     if (heapTracker_ == nullptr) {
1039         return false;
1040     }
1041     int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
1042 
1043     const_cast<EcmaVM *>(vm_)->StopHeapTracking();
1044     if (newThread) {
1045         heapTracker_->StopTracing();
1046     }
1047 
1048     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1049     if (snapshot == nullptr) {
1050         return false;
1051     }
1052 
1053     if (progress != nullptr) {
1054         progress->ReportProgress(0, heapCount);
1055     }
1056     {
1057         ForceSharedGC();
1058         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1059         SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
1060         snapshot->FinishSnapshot();
1061     }
1062 
1063     isProfiling_ = false;
1064     if (progress != nullptr) {
1065         progress->ReportProgress(heapCount, heapCount);
1066     }
1067     return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
1068 }
1069 
GenDumpFileName(DumpFormat dumpFormat)1070 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
1071 {
1072     CString filename("hprof_");
1073     switch (dumpFormat) {
1074         case DumpFormat::JSON:
1075             filename.append(GetTimeStamp());
1076             break;
1077         case DumpFormat::BINARY:
1078             filename.append("unimplemented");
1079             break;
1080         case DumpFormat::OTHER:
1081             filename.append("unimplemented");
1082             break;
1083         default:
1084             filename.append("unimplemented");
1085             break;
1086     }
1087     filename.append(".heapsnapshot");
1088     return ConvertToStdString(filename);
1089 }
1090 
GetTimeStamp()1091 CString HeapProfiler::GetTimeStamp()
1092 {
1093     std::time_t timeSource = std::time(nullptr);
1094     struct tm tm {
1095     };
1096     struct tm *timeData = localtime_r(&timeSource, &tm);
1097     if (timeData == nullptr) {
1098         LOG_FULL(FATAL) << "localtime_r failed";
1099         UNREACHABLE();
1100     }
1101     CString stamp;
1102     const int TIME_START = 1900;
1103     stamp.append(ToCString(timeData->tm_year + TIME_START))
1104         .append("-")
1105         .append(ToCString(timeData->tm_mon + 1))
1106         .append("-")
1107         .append(ToCString(timeData->tm_mday))
1108         .append("_")
1109         .append(ToCString(timeData->tm_hour))
1110         .append("-")
1111         .append(ToCString(timeData->tm_min))
1112         .append("-")
1113         .append(ToCString(timeData->tm_sec));
1114     return stamp;
1115 }
1116 
ForceFullGC(const EcmaVM * vm)1117 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
1118 {
1119     if (vm->IsInitialized()) {
1120         const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
1121         return true;
1122     }
1123     return false;
1124 }
1125 
ForceSharedGC()1126 void HeapProfiler::ForceSharedGC()
1127 {
1128     SharedHeap *sHeap = SharedHeap::GetInstance();
1129     sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(vm_->GetAssociatedJSThread());
1130     sHeap->GetSweeper()->WaitAllTaskFinished();
1131 }
1132 
MakeHeapSnapshot(SampleType sampleType,const DumpSnapShotOption & dumpOption,bool traceAllocation)1133 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption,
1134                                              bool traceAllocation)
1135 {
1136     LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
1137     if (dumpOption.isFullGC) {
1138         DISALLOW_GARBAGE_COLLECTION;
1139         const_cast<Heap *>(vm_->GetHeap())->Prepare();
1140     }
1141     switch (sampleType) {
1142         case SampleType::ONE_SHOT: {
1143             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1144                                                            traceAllocation, entryIdMap_);
1145             if (snapshot == nullptr) {
1146                 LOG_FULL(FATAL) << "alloc snapshot failed";
1147                 UNREACHABLE();
1148             }
1149             snapshot->BuildUp(dumpOption.isSimplify);
1150             return snapshot;
1151         }
1152         case SampleType::REAL_TIME: {
1153             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1154                                                            traceAllocation, entryIdMap_);
1155             if (snapshot == nullptr) {
1156                 LOG_FULL(FATAL) << "alloc snapshot failed";
1157                 UNREACHABLE();
1158             }
1159             AddSnapshot(snapshot);
1160             snapshot->PrepareSnapshot();
1161             return snapshot;
1162         }
1163         default:
1164             return nullptr;
1165     }
1166 }
1167 
AddSnapshot(HeapSnapshot * snapshot)1168 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
1169 {
1170     if (hprofs_.size() >= MAX_NUM_HPROF) {
1171         ClearSnapshot();
1172     }
1173     ASSERT(snapshot != nullptr);
1174     hprofs_.emplace_back(snapshot);
1175 }
1176 
ClearSnapshot()1177 void HeapProfiler::ClearSnapshot()
1178 {
1179     for (auto *snapshot : hprofs_) {
1180         GetChunk()->Delete(snapshot);
1181     }
1182     hprofs_.clear();
1183 }
1184 
StartHeapSampling(uint64_t samplingInterval,int stackDepth)1185 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
1186 {
1187     if (heapSampling_.get()) {
1188         LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
1189         return false;
1190     }
1191     heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
1192                                                    samplingInterval, stackDepth);
1193     return true;
1194 }
1195 
StopHeapSampling()1196 void HeapProfiler::StopHeapSampling()
1197 {
1198     heapSampling_.reset();
1199 }
1200 
GetAllocationProfile()1201 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
1202 {
1203     if (!heapSampling_.get()) {
1204         LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
1205         return nullptr;
1206     }
1207     return heapSampling_->GetAllocationProfile();
1208 }
1209 }  // namespace panda::ecmascript
1210