• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <atomic>
16 #include <chrono>
17 #include <fcntl.h>
18 #include <sys/wait.h>
19 #include <sys/prctl.h>
20 #include <sys/stat.h>
21 #include <sys/syscall.h>
22 #include <thread>
23 #include <unistd.h>
24 #include "ecmascript/dfx/hprof/heap_profiler.h"
25 
26 #include "ecmascript/checkpoint/thread_state_transition.h"
27 #include "ecmascript/dfx/hprof/heap_snapshot.h"
28 #include "ecmascript/jspandafile/js_pandafile_manager.h"
29 #include "ecmascript/mem/heap-inl.h"
30 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
31 #include "ecmascript/base/block_hook_scope.h"
32 #include "ecmascript/dfx/hprof/heap_root_visitor.h"
33 #include "ecmascript/mem/object_xray.h"
34 
35 #if defined(ENABLE_DUMP_IN_FAULTLOG)
36 #include "faultloggerd_client.h"
37 #endif
38 
39 namespace panda::ecmascript {
ForkBySyscall(void)40 static pid_t ForkBySyscall(void)
41 {
42 #ifdef SYS_fork
43     return syscall(SYS_fork);
44 #else
45     return syscall(SYS_clone, SIGCHLD, 0);
46 #endif
47 }
48 
FindId(JSTaggedType addr)49 std::pair<bool, NodeId> EntryIdMap::FindId(JSTaggedType addr)
50 {
51     auto it = idMap_.find(addr);
52     if (it == idMap_.end()) {
53         return std::make_pair(false, GetNextId()); // return nextId if entry not exits
54     } else {
55         return std::make_pair(true, it->second);
56     }
57 }
58 
InsertId(JSTaggedType addr,NodeId id)59 bool EntryIdMap::InsertId(JSTaggedType addr, NodeId id)
60 {
61     auto it = idMap_.find(addr);
62     if (it == idMap_.end()) {
63         idMap_.emplace(addr, id);
64         return true;
65     }
66     idMap_[addr] = id;
67     return false;
68 }
69 
EraseId(JSTaggedType addr)70 bool EntryIdMap::EraseId(JSTaggedType addr)
71 {
72     auto it = idMap_.find(addr);
73     if (it == idMap_.end()) {
74         return false;
75     }
76     idMap_.erase(it);
77     return true;
78 }
79 
Move(JSTaggedType oldAddr,JSTaggedType forwardAddr)80 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
81 {
82     if (oldAddr == forwardAddr) {
83         return true;
84     }
85     auto it = idMap_.find(oldAddr);
86     if (it != idMap_.end()) {
87         NodeId id = it->second;
88         idMap_.erase(it);
89         idMap_[forwardAddr] = id;
90         return true;
91     }
92     return false;
93 }
94 
UpdateEntryIdMap(HeapSnapshot * snapshot)95 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
96 {
97     LOG_ECMA(INFO) << "EntryIdMap::UpdateEntryIdMap";
98     if (snapshot == nullptr) {
99         LOG_ECMA(FATAL) << "EntryIdMap::UpdateEntryIdMap:snapshot is nullptr";
100         UNREACHABLE();
101     }
102     auto nodes = snapshot->GetNodes();
103     CUnorderedMap<JSTaggedType, NodeId> newIdMap;
104     for (auto node : *nodes) {
105         auto addr = node->GetAddress();
106         auto it = idMap_.find(addr);
107         if (it != idMap_.end()) {
108             newIdMap.emplace(addr, it->second);
109         }
110     }
111     idMap_.clear();
112     idMap_ = newIdMap;
113 }
114 
HeapProfiler(const EcmaVM * vm)115 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
116 {
117     isProfiling_ = false;
118     entryIdMap_ = GetChunk()->New<EntryIdMap>();
119 }
120 
~HeapProfiler()121 HeapProfiler::~HeapProfiler()
122 {
123     JSPandaFileManager::GetInstance()->ClearNameMap();
124     ClearSnapshot();
125     GetChunk()->Delete(entryIdMap_);
126 }
127 
AllocationEvent(TaggedObject * address,size_t size)128 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
129 {
130     DISALLOW_GARBAGE_COLLECTION;
131     if (isProfiling_) {
132         // Id will be allocated later while add new node
133         if (heapTracker_ != nullptr) {
134             heapTracker_->AllocationEvent(address, size);
135         }
136     }
137 }
138 
MoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)139 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
140 {
141     LockHolder lock(mutex_);
142     if (isProfiling_) {
143         entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
144         if (heapTracker_ != nullptr) {
145             heapTracker_->MoveEvent(address, forwardAddress, size);
146         }
147     }
148 }
149 
UpdateHeapObjects(HeapSnapshot * snapshot)150 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
151 {
152     SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
153     snapshot->UpdateNodes();
154 }
155 
DumpHeapSnapshot(const DumpSnapShotOption & dumpOption)156 void HeapProfiler::DumpHeapSnapshot([[maybe_unused]] const DumpSnapShotOption &dumpOption)
157 {
158 #if defined(ENABLE_DUMP_IN_FAULTLOG)
159     // Write in faultlog for heap leak.
160     int32_t fd;
161     if (dumpOption.isDumpOOM && dumpOption.dumpFormat == DumpFormat::BINARY) {
162         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_RAW_SNAPSHOT));
163     } else {
164         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
165     }
166     if (fd < 0) {
167         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
168         return;
169     }
170     FileDescriptorStream stream(fd);
171     DumpHeapSnapshot(&stream, dumpOption);
172 #endif
173 }
174 
DoDump(Stream * stream,Progress * progress,const DumpSnapShotOption & dumpOption)175 bool HeapProfiler::DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)
176 {
177     int32_t heapCount = 0;
178     HeapSnapshot *snapshot = nullptr;
179     {
180         if (dumpOption.isFullGC) {
181             size_t heapSize = vm_->GetHeap()->GetLiveObjectSize();
182             LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
183             heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
184             if (progress != nullptr) {
185                 progress->ReportProgress(0, heapCount);
186             }
187         }
188         snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, dumpOption);
189         ASSERT(snapshot != nullptr);
190     }
191     entryIdMap_->UpdateEntryIdMap(snapshot);
192     isProfiling_ = true;
193     if (progress != nullptr) {
194         progress->ReportProgress(heapCount, heapCount);
195     }
196     if (!stream->Good()) {
197         FileStream newStream(GenDumpFileName(dumpOption.dumpFormat));
198         auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
199         GetChunk()->Delete(snapshot);
200         return serializerResult;
201     }
202     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
203     GetChunk()->Delete(snapshot);
204     return serializerResult;
205 }
206 
CheckAndRemoveWeak(JSTaggedValue & value,uint64_t originalAddr)207 static uint64_t CheckAndRemoveWeak(JSTaggedValue &value, uint64_t originalAddr)
208 {
209     if (!value.IsWeak()) {
210         return originalAddr;
211     }
212     JSTaggedValue weakValue(originalAddr);
213     weakValue.RemoveWeakTag();
214     return weakValue.GetRawData();
215 }
216 
CheckAndAddWeak(JSTaggedValue & value,uint64_t originalAddr)217 static uint64_t CheckAndAddWeak(JSTaggedValue &value, uint64_t originalAddr)
218 {
219     if (!value.IsWeak()) {
220         return originalAddr;
221     }
222     JSTaggedValue weakValue(originalAddr);
223     weakValue.CreateWeakRef();
224     return weakValue.GetRawData();
225 }
226 
VisitMember(ObjectSlot & slot,uint64_t objAddr,CUnorderedSet<uint64_t> & notFoundObj,JSHClass * jsHclass,CUnorderedMap<uint64_t,NewAddr * > & objMap)227 static uint64_t VisitMember(ObjectSlot &slot, uint64_t objAddr, CUnorderedSet<uint64_t> &notFoundObj,
228                             JSHClass *jsHclass, CUnorderedMap<uint64_t, NewAddr *> &objMap)
229 {
230     auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
231     JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
232     auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
233     originalAddr = CheckAndRemoveWeak(value, originalAddr);
234     if (!value.IsHeapObject() || originalAddr == 0) {
235         return 0LL;
236     }
237     auto toItemInfo = objMap.find(originalAddr);
238     if (toItemInfo == objMap.end()) {
239         LOG_ECMA(ERROR) << "ark raw heap decode visit " << std::hex << objAddr << ", type="
240                         << JSHClass::DumpJSType(jsHclass->GetObjectType())
241                         << ", not found member old addr=" << originalAddr;
242         notFoundObj.insert(reinterpret_cast<uint64_t>(*taggedPointerAddr));
243         return 0LL;
244     }
245     auto newAddr = reinterpret_cast<uint64_t>(toItemInfo->second->Data());
246     newAddr = CheckAndAddWeak(value, newAddr);
247     slot.Update(reinterpret_cast<TaggedObject *>(newAddr));
248     return newAddr;
249 }
250 
VisitObj(CUnorderedMap<uint64_t,NewAddr * > & objMap)251 CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> VisitObj(CUnorderedMap<uint64_t, NewAddr *> &objMap)
252 {
253     CUnorderedSet<uint64_t> notFoundObj;
254     CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> refSetMap; // old addr map to ref set
255     auto visitor = [&notFoundObj, &objMap, &refSetMap] (TaggedObject *root, ObjectSlot start,
256                                                         ObjectSlot end, VisitObjectArea area) {
257         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
258             return;
259         }
260         auto jsHclass = root->GetClass();
261         auto objAddr = reinterpret_cast<uint64_t>(root);
262         CUnorderedSet<uint64_t> *refSet = nullptr;
263         if (refSetMap.find(objAddr) != refSetMap.end()) {
264             refSet = &refSetMap[objAddr];
265         }
266         for (ObjectSlot slot = start; slot < end; slot++) {
267             auto newAddr = VisitMember(slot, objAddr, notFoundObj, jsHclass, objMap);
268             if (jsHclass->IsJsGlobalEnv() && refSet != nullptr && newAddr != 0LL) {
269                 refSet->insert(newAddr);
270             }
271         }
272     };
273     for (auto objInfo : objMap) {
274         auto newAddr = objInfo.second->Data();
275         auto jsHclassAddr = *reinterpret_cast<uint64_t *>(newAddr);
276         auto jsHclassItem = objMap.find(jsHclassAddr);
277         if (jsHclassItem == objMap.end()) {
278             LOG_ECMA(ERROR) << "ark raw heap decode hclass not find jsHclassAddr=" << std::hex << jsHclassAddr;
279             continue;
280         }
281         TaggedObject *obj = reinterpret_cast<TaggedObject *>(newAddr);
282         *reinterpret_cast<uint64_t *>(newAddr) = reinterpret_cast<uint64_t>(jsHclassItem->second->Data());
283         auto jsHclass = reinterpret_cast<JSHClass *>(jsHclassItem->second->Data());
284         if (jsHclass->IsString()) {
285             continue;
286         }
287         if (jsHclass->IsJsGlobalEnv()) {
288             refSetMap.emplace(reinterpret_cast<uint64_t>(newAddr), CUnorderedSet<uint64_t>());
289         }
290         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, jsHclass, visitor);
291     }
292     if (notFoundObj.size() > 0) {
293         LOG_ECMA(ERROR) << "ark raw heap decode visit obj: not found obj num=" << notFoundObj.size();
294     }
295     return refSetMap;
296 }
297 
GetFileSize(std::string & inputFilePath)298 static uint64_t GetFileSize(std::string &inputFilePath)
299 {
300     if (inputFilePath.empty()) {
301         return 0;
302     }
303     struct stat fileInfo;
304     if (stat(inputFilePath.c_str(), &fileInfo) == 0) {
305         return fileInfo.st_size;
306     }
307     return 0;
308 }
309 
ReadFileAtOffset(std::ifstream & file,uint32_t offset,char * buf,uint32_t size)310 bool ReadFileAtOffset(std::ifstream &file, uint32_t offset, char *buf, uint32_t size)
311 {
312     if (buf == nullptr) {
313         LOG_ECMA(ERROR) << "ark raw heap decode file buf is nullptr";
314         return false;
315     }
316     if (!file.is_open()) {
317         LOG_ECMA(ERROR) << "ark raw heap decode file not open";
318         return false;
319     }
320     file.clear();
321     if (!file.seekg(offset)) {
322         LOG_ECMA(ERROR) << "ark raw heap decode file set offset failed, offset=" << offset;
323         return false;
324     }
325     if (file.read(buf, size).fail()) {
326         LOG_ECMA(ERROR) << "ark raw heap decode file read failed, offset=" << offset;
327         return false;
328     }
329     return true;
330 }
331 
DecodeMemObj(std::ifstream & file,CVector<uint32_t> & sections)332 CUnorderedMap<uint64_t, NewAddr *> DecodeMemObj(std::ifstream &file, CVector<uint32_t> &sections)
333 {
334     CUnorderedMap<uint64_t, NewAddr *> objMap; // old addr map to new obj
335     uint32_t heapTotalSize = 0;
336     uint32_t objTotalNum = 0;
337     for (uint32_t sec = 4; sec + 1 < sections.size(); sec += 2) { // 2 :step is 2
338         uint32_t offset = sections[sec];
339         uint32_t secHead[2];
340         if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
341             LOG_ECMA(ERROR) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset="
342                             << offset << ", size=" << sections[sec + 1];
343             return objMap;
344         }
345         LOG_ECMA(INFO) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset=" << offset
346                         << ", size=" << sections[sec + 1] << ", obj num=" << secHead[0];
347         auto tbSize = secHead[0] * sizeof(AddrTableItem);
348         if (secHead[1] != sizeof(AddrTableItem) || tbSize == 0 || tbSize > MAX_OBJ_SIZE) {
349             LOG_ECMA(ERROR) << "ark raw heap decode check obj table section=" << sections[sec] << ", head size="
350                             << sizeof(AddrTableItem) << ", but=" << secHead[1] << "or error table size=" << tbSize;
351             continue;
352         }
353         CVector<char> objTabBuf(tbSize);
354         file.read(objTabBuf.data(), tbSize);
355         auto objTab = reinterpret_cast<AddrTableItem *>(objTabBuf.data());
356         offset += sizeof(secHead);
357         objTotalNum += secHead[0];
358         for (uint32_t i = 0; i < secHead[0]; i++) {
359             heapTotalSize += objTab[i].objSize;
360             auto actSize = i + 1 < secHead[0] ? objTab[i + 1].offset - objTab[i].offset :
361                            sections[sec + 1] - objTab[i].offset - sizeof(secHead);
362             if (actSize != objTab[i].objSize && actSize != sizeof(uint64_t)) {
363                 auto tabOffset = offset + i * sizeof(AddrTableItem);
364                 LOG_ECMA(ERROR) << "ark raw heap decode check obj size i=" << i << std::hex << ", offset=" << tabOffset
365                                 << ", addr=" << objTab[i].addr << ", size=" << objTab[i].objSize << " but=" << actSize;
366                 continue;
367             }
368             objMap.emplace(objTab[i].addr, new NewAddr(actSize, objTab[i].objSize));
369             auto result = ReadFileAtOffset(file, offset + objTab[i].offset, objMap[objTab[i].addr]->Data(), actSize);
370             if (!result) {
371                 LOG_ECMA(ERROR) << "ark raw heap decode read failed, i=" << i << ", base offset=" << offset
372                                 << ", obj addr=" << objTab[i].addr << ", read size=" << actSize;
373                 return objMap;
374             }
375         }
376     }
377     LOG_ECMA(INFO) << "ark raw heap decode read obj, num=" << objTotalNum << ", size=" << heapTotalSize;
378     return objMap;
379 }
380 
DecodeStrTable(StringHashMap * strTable,std::ifstream & file,uint32_t offset,uint32_t secSize)381 CUnorderedMap<uint64_t, CString *> DecodeStrTable(StringHashMap *strTable, std::ifstream &file,
382                                                   uint32_t offset, uint32_t secSize)
383 {
384     uint32_t secHead[2];
385     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
386         LOG_ECMA(ERROR) << "ark raw heap decode read str table failed, offset=" << offset << ", size=" << secSize;
387         return CUnorderedMap<uint64_t, CString *>(0);
388     }
389     uint32_t byteNum = secSize - sizeof(secHead);
390     char *charPtr = new char[byteNum];
391     file.read(charPtr, byteNum);
392     CUnorderedMap<uint64_t, CString *> strTabMap; // old addr map to str id
393     uint32_t cnt = 0;
394     uint32_t baseOff = 0;
395     while (cnt++ < secHead[0]) {
396         uint32_t *u32Ptr = reinterpret_cast<uint32_t *>(charPtr + baseOff);
397         auto strOffset = (u32Ptr[1] + 1) * sizeof(uint64_t) + baseOff;
398         auto getSize = strlen(charPtr + strOffset);
399         if (u32Ptr[0] != getSize) {
400             LOG_ECMA(ERROR) << cnt << " ark raw heap decode check str size=" << u32Ptr[0] << ", but=" << getSize<<"\n";
401         }
402         auto strAddr = strTable->GetString(charPtr + strOffset);
403         uint32_t num = 0;
404         uint64_t *u64Ptr = reinterpret_cast<uint64_t *>(&u32Ptr[2]);
405         while (num < u32Ptr[1]) {
406             strTabMap[u64Ptr[num]] = strAddr;
407             num++;
408         }
409         baseOff = strOffset + u32Ptr[0] + 1;
410     }
411     delete[] charPtr;
412     LOG_ECMA(INFO) << "ark raw heap decode string table size=" << strTable->GetCapcity();
413     return strTabMap;
414 }
415 
DecodeRootTable(std::ifstream & file,uint32_t offset,uint32_t secSize)416 CUnorderedSet<uint64_t> DecodeRootTable(std::ifstream &file, uint32_t offset, uint32_t secSize)
417 {
418     uint32_t secHead[2];
419     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
420         LOG_ECMA(ERROR) << "ark raw heap decode read root table failed, offset=" << offset << ", size=" << secSize;
421         return CUnorderedSet<uint64_t>(0);
422     }
423     if (secHead[1] != sizeof(uint64_t)) {
424         LOG_ECMA(ERROR) << "ark raw heap decode error root size, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
425         return CUnorderedSet<uint64_t>(0);
426     }
427     auto checkSize = sizeof(uint64_t) * secHead[0] + sizeof(secHead);
428     if (secSize != checkSize) {
429         LOG_ECMA(ERROR) << "ark raw heap decode check root section size=" << secSize << ", but=" << checkSize;
430         return CUnorderedSet<uint64_t>(0);
431     }
432     CVector<uint64_t> rootVec(secHead[0]);
433     file.read(reinterpret_cast<char *>(rootVec.data()), sizeof(uint64_t) * secHead[0]);
434     CUnorderedSet<uint64_t> rootSet;
435     for (auto addr : rootVec) {
436         rootSet.insert(addr);
437     }
438     LOG_ECMA(INFO) << "ark raw heap decode root obj num=" << rootSet.size();
439     return rootSet;
440 }
441 
GetSectionInfo(std::ifstream & file,uint64_t fileSize)442 CVector<uint32_t> GetSectionInfo(std::ifstream &file, uint64_t fileSize)
443 {
444     uint32_t secHead[2];
445     uint32_t fileOffset = fileSize - sizeof(uint32_t) * 2; // 2 : last 2 uint32
446     file.seekg(fileOffset);
447     file.read(reinterpret_cast<char *>(secHead), sizeof(secHead));
448     if (secHead[1] != sizeof(uint32_t)) {
449         LOG_ECMA(ERROR) << "ark raw heap decode unexpect head, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
450         return CVector<uint32_t>(0);
451     }
452     CVector<uint32_t> secInfo(secHead[0]); // last 4 byte is section num
453     auto secInfoSize = secHead[0] * secHead[1];
454     fileOffset -= secInfoSize;
455     file.seekg(fileOffset);
456     file.read(reinterpret_cast<char *>(secInfo.data()), secInfoSize);
457     return secInfo;
458 }
459 
ClearObjMem(CUnorderedMap<uint64_t,NewAddr * > & objMap)460 void ClearObjMem(CUnorderedMap<uint64_t, NewAddr *> &objMap)
461 {
462     for (auto objItem : objMap) {
463         delete objItem.second;
464     }
465     objMap.clear();
466 }
467 
GenerateHeapSnapshot(std::string & inputFilePath,std::string & outputPath)468 bool HeapProfiler::GenerateHeapSnapshot(std::string &inputFilePath, std::string &outputPath)
469 {
470     LOG_ECMA(INFO) << "ark raw heap decode start target=" << outputPath;
471     uint64_t fileSize = GetFileSize(inputFilePath);
472     if (fileSize == 0) {
473         LOG_ECMA(ERROR) << "ark raw heap decode get file size=0";
474         return false;
475     }
476     std::ifstream file(inputFilePath, std::ios::binary);
477     if (!file.is_open()) {
478         LOG_ECMA(ERROR) << "ark raw heap decode file failed:" << inputFilePath.c_str();
479         return false;
480     }
481     if (fileSize > MAX_FILE_SIZE) {
482         LOG_ECMA(ERROR) << "ark raw heap decode get file size > 4GB, unsupported";
483         return false;
484     }
485     CVector<uint32_t> sections = GetSectionInfo(file, fileSize);
486     if (sections.size() == 0) {
487         LOG_ECMA(ERROR) << "ark raw heap decode not found section data";
488         return false;
489     }
490     auto objMap = DecodeMemObj(file, sections);
491     auto refSetMap = VisitObj(objMap);
492     auto rootSet = DecodeRootTable(file, sections[0], sections[1]);
493     auto strTabMap = DecodeStrTable(GetEcmaStringTable(), file, sections[2], sections[3]);
494     file.close();
495     DumpSnapShotOption dp;
496     auto *snapshot = new HeapSnapshot(vm_, GetEcmaStringTable(), dp, false, entryIdMap_, GetChunk());
497     LOG_ECMA(INFO) << "ark raw heap decode generate nodes=" << objMap.size();
498     snapshot->GenerateNodeForBinMod(objMap, rootSet, strTabMap);
499     rootSet.clear();
500     strTabMap.clear();
501     LOG_ECMA(INFO) << "ark raw heap decode fill edges=" << objMap.size();
502     snapshot->BuildSnapshotForBinMod(objMap, refSetMap);
503     refSetMap.clear();
504     ClearObjMem(objMap);
505     if (outputPath.empty()) {
506         outputPath = GenDumpFileName(dp.dumpFormat);
507     } else if (outputPath.back() == '/') {
508         outputPath += GenDumpFileName(dp.dumpFormat);
509     }
510     LOG_ECMA(INFO) << "ark raw heap decode serialize file=" << outputPath.c_str();
511     FileStream newStream(outputPath);
512     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
513     delete snapshot;
514     LOG_ECMA(INFO) << "ark raw heap decode finish";
515     return serializerResult;
516 }
517 
WaitProcess(pid_t pid)518 [[maybe_unused]]static void WaitProcess(pid_t pid)
519 {
520     time_t startTime = time(nullptr);
521     constexpr int DUMP_TIME_OUT = 300;
522     constexpr int DEFAULT_SLEEP_TIME = 100000;
523     while (true) {
524         int status = 0;
525         pid_t p = waitpid(pid, &status, WNOHANG);
526         if (p < 0 || p == pid) {
527             break;
528         }
529         if (time(nullptr) > startTime + DUMP_TIME_OUT) {
530             LOG_GC(ERROR) << "DumpHeapSnapshot kill thread, wait " << DUMP_TIME_OUT << " s";
531             kill(pid, SIGTERM);
532             break;
533         }
534         usleep(DEFAULT_SLEEP_TIME);
535     }
536 }
537 
538 template<typename Callback>
IterateSharedHeap(Callback & cb)539 void IterateSharedHeap(Callback &cb)
540 {
541     auto heap = SharedHeap::GetInstance();
542     heap->GetOldSpace()->IterateOverObjects(cb);
543     heap->GetCompressSpace()->IterateOverObjects(cb);
544     heap->GetNonMovableSpace()->IterateOverObjects(cb);
545     heap->GetHugeObjectSpace()->IterateOverObjects(cb);
546     heap->GetAppSpawnSpace()->IterateOverObjects(cb);
547     heap->GetReadOnlySpace()->IterateOverObjects(cb);
548 }
549 
GetHeapCntAndSize(const EcmaVM * vm)550 std::pair<uint64_t, uint64_t> GetHeapCntAndSize(const EcmaVM *vm)
551 {
552     uint64_t cnt = 0;
553     uint64_t objectSize = 0;
554     auto cb = [&objectSize, &cnt]([[maybe_unused]] TaggedObject *obj) {
555         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
556         ++cnt;
557     };
558     vm->GetHeap()->IterateOverObjects(cb, false);
559     return std::make_pair(cnt, objectSize);
560 }
561 
GetSharedCntAndSize()562 std::pair<uint64_t, uint64_t> GetSharedCntAndSize()
563 {
564     uint64_t cnt = 0;
565     uint64_t size = 0;
566     auto cb = [&cnt, &size](TaggedObject *obj) {
567         cnt++;
568         size += obj->GetClass()->SizeFromJSHClass(obj);
569     };
570     IterateSharedHeap(cb);
571     return std::make_pair(cnt, size);
572 }
573 
GetRootObjects(const EcmaVM * vm)574 static CUnorderedSet<TaggedObject*> GetRootObjects(const EcmaVM *vm)
575 {
576     CUnorderedSet<TaggedObject*> result {};
577     HeapRootVisitor visitor;
578     uint32_t rootCnt1 = 0;
579     RootVisitor rootEdgeBuilder = [&result, &rootCnt1](
580         [[maybe_unused]] Root type, ObjectSlot slot) {
581         JSTaggedValue value((slot).GetTaggedType());
582         if (!value.IsHeapObject()) {
583             return;
584         }
585         ++rootCnt1;
586         TaggedObject *root = value.GetTaggedObject();
587         result.insert(root);
588     };
589     RootBaseAndDerivedVisitor rootBaseEdgeBuilder = []
590         ([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base, [[maybe_unused]] ObjectSlot derived,
591          [[maybe_unused]] uintptr_t baseOldObject) {
592     };
593     uint32_t rootCnt2 = 0;
594     RootRangeVisitor rootRangeEdgeBuilder = [&result, &rootCnt2]([[maybe_unused]] Root type,
595         ObjectSlot start, ObjectSlot end) {
596         for (ObjectSlot slot = start; slot < end; slot++) {
597             JSTaggedValue value((slot).GetTaggedType());
598             if (!value.IsHeapObject()) {
599                 continue;
600             }
601             ++rootCnt2;
602             TaggedObject *root = value.GetTaggedObject();
603             result.insert(root);
604         }
605     };
606     visitor.VisitHeapRoots(vm->GetJSThread(), rootEdgeBuilder, rootRangeEdgeBuilder, rootBaseEdgeBuilder);
607     SharedModuleManager::GetInstance()->Iterate(rootEdgeBuilder);
608     Runtime::GetInstance()->IterateCachedStringRoot(rootRangeEdgeBuilder);
609     return result;
610 }
611 
GetNotFoundObj(const EcmaVM * vm)612 size_t GetNotFoundObj(const EcmaVM *vm)
613 {
614     size_t heapTotalSize = 0;
615     CUnorderedSet<TaggedObject*> allHeapObjSet {};
616     auto handleObj = [&allHeapObjSet, &heapTotalSize](TaggedObject *obj) {
617         allHeapObjSet.insert(obj);
618         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
619         heapTotalSize += objSize;
620     };
621     vm->GetHeap()->IterateOverObjects(handleObj, false);
622     vm->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
623     IterateSharedHeap(handleObj);
624     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound heap count:" << allHeapObjSet.size()
625                    << ", heap size=" << heapTotalSize;
626     CUnorderedSet<TaggedObject *> notFoundObjSet {};
627     auto visitor = [&notFoundObjSet, &allHeapObjSet] ([[maybe_unused]]TaggedObject *root, ObjectSlot start,
628                                                       ObjectSlot end, VisitObjectArea area) {
629         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
630             return;
631         }
632         for (ObjectSlot slot = start; slot < end; slot++) {
633             auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
634             JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
635             auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
636             if (!value.IsHeapObject() || originalAddr == 0) {
637                 continue;
638             }
639             if (value.IsWeakForHeapObject()) {
640                 originalAddr -= 1;
641             }
642             if (allHeapObjSet.find(reinterpret_cast<TaggedObject *>(originalAddr)) != allHeapObjSet.end()) {
643                 continue;
644             }
645             auto obj = reinterpret_cast<TaggedObject *>(*taggedPointerAddr);
646             if (notFoundObjSet.find(obj) != notFoundObjSet.end()) {
647                 continue;
648             }
649             notFoundObjSet.insert(obj);
650         }
651     };
652     for (auto obj : allHeapObjSet) {
653         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, obj->GetClass(), visitor);
654     }
655     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound not found count:" << notFoundObjSet.size();
656     return notFoundObjSet.size();
657 }
658 
CopyObjectMem2Buf(char * objTable,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBufMap)659 uint32_t HeapProfiler::CopyObjectMem2Buf(char *objTable, uint32_t objNum,
660                                          CVector<std::pair<char *, uint32_t>> &memBufMap)
661 {
662     char *currMemBuf = nullptr;
663     auto currSize = 0;
664     uint32_t totalSize = 0;
665     uint32_t curOffset = objNum * sizeof(AddrTableItem);
666     auto objHeaders = reinterpret_cast<AddrTableItem *>(objTable);
667     for (uint32_t j = 0; j < objNum; ++j) {
668         auto obj = reinterpret_cast<TaggedObject *>(objHeaders[j].addr);
669         JSTaggedValue value(obj);
670         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
671         totalSize += objSize;
672         if (currSize + objSize > PER_GROUP_MEM_SIZE || currMemBuf == nullptr) {
673             if (currMemBuf != nullptr) {
674                 memBufMap.push_back({currMemBuf, currSize});
675             }
676             currSize = 0;
677             currMemBuf = chunk_.NewArray<char>(objSize > PER_GROUP_MEM_SIZE? objSize : PER_GROUP_MEM_SIZE);
678         }
679         objHeaders[j].objSize = objSize;
680         objHeaders[j].offset = curOffset;
681         int32_t ret;
682         if (value.IsString()) {
683             CVector<uint64_t> strTmp(objSize / sizeof(uint64_t), 0);
684             strTmp[0] = *reinterpret_cast<uint64_t *>(objHeaders[j].addr);
685             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(strTmp.data()), objSize);
686         } else {
687             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(objHeaders[j].addr), objSize);
688         }
689         if (ret != 0) {
690             LOG_ECMA(ERROR) << "ark raw heap dump CopyObjectMem memcpy_s failed, currSize="
691                             << currSize << ",objSize=" << objSize << ",addr=" << objHeaders[j].addr;
692             return totalSize;
693         }
694         curOffset += objSize;
695         currSize += objSize;
696     }
697     if (currSize > 0) {
698         memBufMap.push_back({currMemBuf, currSize});
699     } else if (currMemBuf != nullptr) {
700         chunk_.Delete<char>(currMemBuf);
701     }
702     return totalSize;
703 }
704 
GenObjTable(CUnorderedMap<char *,uint32_t> & headerMap,HeapSnapshot * snapshot,CUnorderedMap<uint64_t,CVector<uint64_t>> & strIdMap)705 uint32_t HeapProfiler::GenObjTable(CUnorderedMap<char *, uint32_t> &headerMap, HeapSnapshot *snapshot,
706                                    CUnorderedMap<uint64_t, CVector<uint64_t>> &strIdMap)
707 {
708     char *currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
709     uint32_t index = 0;
710     uint32_t objNum = 0;
711     auto table = reinterpret_cast<AddrTableItem *>(currBuf);
712     auto handleObj = [&index, &table, &objNum, &headerMap, &currBuf, &snapshot, &strIdMap, this](TaggedObject *obj) {
713         JSTaggedValue value(obj);
714         auto taggedType = value.GetRawData();
715         auto [exist, id] = entryIdMap_->FindId(taggedType);
716         if (!exist) {
717             entryIdMap_->InsertId(taggedType, id);
718         }
719         table[index].addr = reinterpret_cast<uint64_t>(obj);
720         table[index].id = id;
721         auto strId = snapshot->GenerateStringId(obj);
722         if (strId != 1) { // 1 : invalid str id
723             if (strIdMap.find(strId) == strIdMap.end()) {
724                 strIdMap.emplace(strId, CVector<uint64_t>());
725             }
726             strIdMap[strId].push_back(table[index].addr);
727         }
728         index++;
729         if (index == HEAD_NUM_PER_GROUP) {
730             headerMap.emplace(currBuf, index);
731             objNum += HEAD_NUM_PER_GROUP;
732             index = 0;
733             currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
734             table = reinterpret_cast<AddrTableItem *>(currBuf);
735         }
736     };
737     vm_->GetHeap()->IterateOverObjects(handleObj, false);
738     vm_->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
739     IterateSharedHeap(handleObj);
740     objNum += index;
741     if (index != 0) {
742         headerMap.emplace(currBuf, index);
743     } else {
744         chunk_.Delete<char>(currBuf);
745     }
746     return objNum;
747 }
748 
749 // 4 byte: root_num
750 // 4 byte: unit size = sizeof(addr), 8 byte here
751 // {8 byte: root obj addr} * root_num
GenRootTable(Stream * stream)752 uint32_t HeapProfiler::GenRootTable(Stream *stream)
753 {
754     auto roots = GetRootObjects(vm_);
755     auto rootSecHeadSize = 8; // 8 : root num 、 unit size
756     auto rootSecSize = roots.size() * (sizeof(TaggedObject *)) + rootSecHeadSize;
757     auto memBuf = chunk_.NewArray<char>(rootSecSize);
758     uint32_t *rootHeader = reinterpret_cast<uint32_t *>(memBuf);
759     uint64_t *rootBuf = reinterpret_cast<uint64_t *>(memBuf + rootSecHeadSize); // 8 : root addr start offset
760     rootHeader[0] = roots.size(); // 0: root num
761     rootHeader[1] = sizeof(TaggedObject *); // 1: unit size
762     auto currInd = 0;
763     for (auto root : roots) {
764         rootBuf[currInd++] = reinterpret_cast<uint64_t>(root);
765     }
766     LOG_ECMA(INFO) << "ark raw heap dump GenRootTable root cnt="<<roots.size();
767     stream->WriteBinBlock(memBuf, rootSecSize);
768     chunk_.Delete<char>(memBuf);
769     return rootSecSize;
770 }
771 
772 
773 // 4 byte: obj_num
774 // 4 byte: unit size = sizeof(AddrTableItem)
775 // {AddrTableItem} * obj_num
776 // {obj contents} * obj_num
WriteToBinFile(Stream * stream,char * objTab,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBuf)777 uint32_t HeapProfiler::WriteToBinFile(Stream *stream, char *objTab, uint32_t objNum,
778                                       CVector<std::pair<char *, uint32_t>> &memBuf)
779 {
780     uint32_t secHeader[] = {objNum, sizeof(AddrTableItem)};
781     uint32_t secTotalSize = sizeof(secHeader);
782     stream->WriteBinBlock(reinterpret_cast<char *>(secHeader), secTotalSize);
783     uint32_t headerSize = objNum * sizeof(AddrTableItem);
784     secTotalSize += headerSize;
785     stream->WriteBinBlock(objTab, headerSize); // write obj header
786     chunk_.Delete<char>(objTab);
787     for (auto memItem : memBuf) {
788         stream->WriteBinBlock(memItem.first, memItem.second);
789         secTotalSize += memItem.second;
790         chunk_.Delete<char>(memItem.first);
791     }
792     return secTotalSize;
793 }
794 
DumpRawHeap(Stream * stream,uint32_t & fileOffset,CVector<uint32_t> & secIndexVec)795 bool HeapProfiler::DumpRawHeap(Stream *stream, uint32_t &fileOffset, CVector<uint32_t> &secIndexVec)
796 {
797     CUnorderedMap<char *, uint32_t> objTabMap; // buf map table num
798     CUnorderedMap<uint64_t, CVector<uint64_t>> strIdMapObjVec; // string id map to objs vector
799     DumpSnapShotOption op;
800     auto snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), op, false, entryIdMap_, GetChunk());
801     uint32_t objTotalNum = GenObjTable(objTabMap, snapshot, strIdMapObjVec);
802     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap totalObjNumber=" << objTotalNum;
803     CVector<CVector<std::pair<char *, uint32_t>>> allMemBuf(objTabMap.size(), CVector<std::pair<char *, uint32_t>>());
804     CVector<std::thread> threadsVec;
805     CVector<char *> objTabVec(objTabMap.size());
806     uint32_t index = 0;
807     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap start to copy, thread num=" << objTabMap.size();
808     for (auto tableItem : objTabMap) {
809         auto tdCb = [this, &tableItem, &allMemBuf, &index] () {
810             CopyObjectMem2Buf(tableItem.first, tableItem.second, allMemBuf[index]);
811         };
812         threadsVec.emplace_back(tdCb);
813         objTabVec[index] = tableItem.first;
814         threadsVec[index].join();
815         ++index;
816     }
817     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write string, num=" << strIdMapObjVec.size();
818     secIndexVec.push_back(fileOffset); // string table section offset
819     auto size = HeapSnapshotJSONSerializer::DumpStringTable(GetEcmaStringTable(), stream, strIdMapObjVec);
820     secIndexVec.push_back(size); // string table section size
821     GetChunk()->Delete(snapshot);
822     fileOffset += size;
823     strIdMapObjVec.clear();
824     uint32_t finCnt = 0;
825     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write obj, offset=" << fileOffset;
826     while (finCnt < threadsVec.size()) {
827         for (index = 0; index < threadsVec.size(); ++index) {
828             if (threadsVec[index].joinable()) { // thread not finished
829                 continue;
830             }
831             ++finCnt;
832             secIndexVec.push_back(fileOffset); // current section offset
833             auto objNum = objTabMap[objTabVec[index]];
834             auto currSecSize = WriteToBinFile(stream, objTabVec[index], objNum, allMemBuf[index]);
835             LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write offset=" << fileOffset << ", size=" << currSecSize;
836             secIndexVec.push_back(currSecSize); // current section size
837             fileOffset += currSecSize;
838         }
839     }
840     return true;
841 }
842 
843 //  * 8 byte: version id
844 //  * root table section
845 //  * string table section
846 //  * {heap section / share heap section} * thread_num
847 //  * 4 byte: root table section offset
848 //  * 4 byte: root table section size
849 //  * 4 byte: string table section offset
850 //  * 4 byte: string table section size
851 //  * {
852 //  * 4 byte: obj section offset
853 //  * 4 byte: obj section size
854 //  * } * thread_num
855 //  * 4 byte: section_offset_num size, 4 byte here
856 //  * 4 byte: section_num
BinaryDump(Stream * stream,const DumpSnapShotOption & dumpOption)857 bool HeapProfiler::BinaryDump(Stream *stream, [[maybe_unused]] const DumpSnapShotOption &dumpOption)
858 {
859     char versionID[VERSION_ID_SIZE] = { 0 };
860     LOG_ECMA(INFO) << "ark raw heap dump start, version is: " << versionID;
861     stream->WriteBinBlock(versionID, VERSION_ID_SIZE);
862     CQueue<CVector<TaggedObject *>> needStrObjQue;
863     // a vector to index all sections, [offset, section_size, offset, section_size, ...]
864     CVector<uint32_t> secIndexVec(2); // 2 : section head size
865     uint32_t fileOffset = VERSION_ID_SIZE;
866     secIndexVec[0] = fileOffset;
867     LOG_ECMA(INFO) << "ark raw heap dump GenRootTable";
868     auto rootSectionSize = GenRootTable(stream);
869     secIndexVec[1] = rootSectionSize; // root section offset
870     fileOffset += rootSectionSize; // root section size
871     DumpRawHeap(stream, fileOffset, secIndexVec);
872     secIndexVec.push_back(secIndexVec.size()); // 4 byte is section num
873     secIndexVec.push_back(sizeof(uint32_t)); // the penultimate is section index data bytes number
874     stream->WriteBinBlock(reinterpret_cast<char *>(secIndexVec.data()), secIndexVec.size() *sizeof(uint32_t));
875 #ifdef OHOS_UNIT_TEST
876     LOG_ECMA(INFO) << "ark raw heap dump UT check obj self-contained";
877     size_t ret = GetNotFoundObj(vm_);
878     return ret == 0;
879 #else
880     LOG_ECMA(INFO) << "ark raw heap dump finished num=" << secIndexVec.size();
881     return true;
882 #endif
883 }
884 
FillIdMap()885 void HeapProfiler::FillIdMap()
886 {
887     EntryIdMap* newEntryIdMap = GetChunk()->New<EntryIdMap>();
888     // Iterate SharedHeap Object
889     SharedHeap* sHeap = SharedHeap::GetInstance();
890     if (sHeap != nullptr) {
891         sHeap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
892             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
893             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
894             newEntryIdMap->InsertId(addr, sequenceId);
895         });
896     }
897 
898     // Iterate LocalHeap Object
899     auto heap = vm_->GetHeap();
900     if (heap != nullptr) {
901         heap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
902             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
903             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
904             newEntryIdMap->InsertId(addr, sequenceId);
905         });
906     }
907 
908     // copy entryIdMap
909     CUnorderedMap<JSTaggedType, NodeId>* idMap = entryIdMap_->GetIdMap();
910     CUnorderedMap<JSTaggedType, NodeId>* newIdMap = newEntryIdMap->GetIdMap();
911     *idMap = *newIdMap;
912 
913     GetChunk()->Delete(newEntryIdMap);
914 }
915 
DumpHeapSnapshot(Stream * stream,const DumpSnapShotOption & dumpOption,Progress * progress)916 bool HeapProfiler::DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress)
917 {
918     bool res = false;
919     base::BlockHookScope blockScope;
920     ThreadManagedScope managedScope(vm_->GetJSThread());
921     pid_t pid = -1;
922     {
923         if (dumpOption.isFullGC) {
924             [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
925             ASSERT(heapClean);
926         }
927         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread()); // suspend All.
928         if (dumpOption.isFullGC) {
929             DISALLOW_GARBAGE_COLLECTION;
930             const_cast<Heap *>(vm_->GetHeap())->Prepare();
931             SharedHeap::GetInstance()->Prepare(true);
932         }
933         Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
934             ASSERT(!thread->IsInRunningState());
935             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
936         });
937         if (dumpOption.isBeforeFill) {
938             FillIdMap();
939         }
940         if (dumpOption.isDumpOOM) {
941             res = BinaryDump(stream, dumpOption);
942             stream->EndOfStream();
943             return res;
944         }
945         // fork
946         if ((pid = ForkBySyscall()) < 0) {
947             LOG_ECMA(ERROR) << "DumpHeapSnapshot fork failed!";
948             return false;
949         }
950         if (pid == 0) {
951             vm_->GetAssociatedJSThread()->EnableCrossThreadExecution();
952             prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("dump_process"), 0, 0, 0);
953             res = DoDump(stream, progress, dumpOption);
954             _exit(0);
955         }
956     }
957     if (pid != 0) {
958         if (dumpOption.isSync) {
959             WaitProcess(pid);
960         } else {
961             std::thread thread(&WaitProcess, pid);
962             thread.detach();
963         }
964         stream->EndOfStream();
965     }
966     isProfiling_ = true;
967     return res;
968 }
969 
StartHeapTracking(double timeInterval,bool isVmMode,Stream * stream,bool traceAllocation,bool newThread)970 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
971                                      bool traceAllocation, bool newThread)
972 {
973     vm_->CollectGarbage(TriggerGCType::OLD_GC);
974     ForceSharedGC();
975     SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
976     DumpSnapShotOption dumpOption;
977     dumpOption.isVmMode = isVmMode;
978     dumpOption.isPrivate = false;
979     dumpOption.captureNumericValue = false;
980     HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, dumpOption, traceAllocation);
981     if (snapshot == nullptr) {
982         return false;
983     }
984     isProfiling_ = true;
985     UpdateHeapObjects(snapshot);
986     heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
987     const_cast<EcmaVM *>(vm_)->StartHeapTracking();
988     if (newThread) {
989         heapTracker_->StartTracing();
990     }
991 
992     return true;
993 }
994 
UpdateHeapTracking(Stream * stream)995 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
996 {
997     if (heapTracker_ == nullptr) {
998         return false;
999     }
1000     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1001     if (snapshot == nullptr) {
1002         return false;
1003     }
1004 
1005     {
1006         vm_->CollectGarbage(TriggerGCType::OLD_GC);
1007         ForceSharedGC();
1008         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1009         snapshot->RecordSampleTime();
1010         UpdateHeapObjects(snapshot);
1011     }
1012 
1013     if (stream != nullptr) {
1014         snapshot->PushHeapStat(stream);
1015     }
1016 
1017     return true;
1018 }
1019 
StopHeapTracking(Stream * stream,Progress * progress,bool newThread)1020 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
1021 {
1022     if (heapTracker_ == nullptr) {
1023         return false;
1024     }
1025     int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
1026 
1027     const_cast<EcmaVM *>(vm_)->StopHeapTracking();
1028     if (newThread) {
1029         heapTracker_->StopTracing();
1030     }
1031 
1032     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1033     if (snapshot == nullptr) {
1034         return false;
1035     }
1036 
1037     if (progress != nullptr) {
1038         progress->ReportProgress(0, heapCount);
1039     }
1040     {
1041         ForceSharedGC();
1042         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1043         SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
1044         snapshot->FinishSnapshot();
1045     }
1046 
1047     isProfiling_ = false;
1048     if (progress != nullptr) {
1049         progress->ReportProgress(heapCount, heapCount);
1050     }
1051     return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
1052 }
1053 
GenDumpFileName(DumpFormat dumpFormat)1054 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
1055 {
1056     CString filename("hprof_");
1057     switch (dumpFormat) {
1058         case DumpFormat::JSON:
1059             filename.append(GetTimeStamp());
1060             break;
1061         case DumpFormat::BINARY:
1062             filename.append("unimplemented");
1063             break;
1064         case DumpFormat::OTHER:
1065             filename.append("unimplemented");
1066             break;
1067         default:
1068             filename.append("unimplemented");
1069             break;
1070     }
1071     filename.append(".heapsnapshot");
1072     return ConvertToStdString(filename);
1073 }
1074 
GetTimeStamp()1075 CString HeapProfiler::GetTimeStamp()
1076 {
1077     std::time_t timeSource = std::time(nullptr);
1078     struct tm tm {
1079     };
1080     struct tm *timeData = localtime_r(&timeSource, &tm);
1081     if (timeData == nullptr) {
1082         LOG_FULL(FATAL) << "localtime_r failed";
1083         UNREACHABLE();
1084     }
1085     CString stamp;
1086     const int TIME_START = 1900;
1087     stamp.append(ToCString(timeData->tm_year + TIME_START))
1088         .append("-")
1089         .append(ToCString(timeData->tm_mon + 1))
1090         .append("-")
1091         .append(ToCString(timeData->tm_mday))
1092         .append("_")
1093         .append(ToCString(timeData->tm_hour))
1094         .append("-")
1095         .append(ToCString(timeData->tm_min))
1096         .append("-")
1097         .append(ToCString(timeData->tm_sec));
1098     return stamp;
1099 }
1100 
ForceFullGC(const EcmaVM * vm)1101 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
1102 {
1103     if (vm->IsInitialized()) {
1104         const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
1105         return true;
1106     }
1107     return false;
1108 }
1109 
ForceSharedGC()1110 void HeapProfiler::ForceSharedGC()
1111 {
1112     SharedHeap *sHeap = SharedHeap::GetInstance();
1113     sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(vm_->GetAssociatedJSThread());
1114     sHeap->GetSweeper()->WaitAllTaskFinished();
1115 }
1116 
MakeHeapSnapshot(SampleType sampleType,const DumpSnapShotOption & dumpOption,bool traceAllocation)1117 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption,
1118                                              bool traceAllocation)
1119 {
1120     LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
1121     if (dumpOption.isFullGC) {
1122         DISALLOW_GARBAGE_COLLECTION;
1123         const_cast<Heap *>(vm_->GetHeap())->Prepare();
1124     }
1125     switch (sampleType) {
1126         case SampleType::ONE_SHOT: {
1127             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1128                                                            traceAllocation, entryIdMap_, GetChunk());
1129             if (snapshot == nullptr) {
1130                 LOG_FULL(FATAL) << "alloc snapshot failed";
1131                 UNREACHABLE();
1132             }
1133             snapshot->BuildUp(dumpOption.isSimplify);
1134             return snapshot;
1135         }
1136         case SampleType::REAL_TIME: {
1137             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1138                                                            traceAllocation, entryIdMap_, GetChunk());
1139             if (snapshot == nullptr) {
1140                 LOG_FULL(FATAL) << "alloc snapshot failed";
1141                 UNREACHABLE();
1142             }
1143             AddSnapshot(snapshot);
1144             snapshot->PrepareSnapshot();
1145             return snapshot;
1146         }
1147         default:
1148             return nullptr;
1149     }
1150 }
1151 
AddSnapshot(HeapSnapshot * snapshot)1152 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
1153 {
1154     if (hprofs_.size() >= MAX_NUM_HPROF) {
1155         ClearSnapshot();
1156     }
1157     ASSERT(snapshot != nullptr);
1158     hprofs_.emplace_back(snapshot);
1159 }
1160 
ClearSnapshot()1161 void HeapProfiler::ClearSnapshot()
1162 {
1163     for (auto *snapshot : hprofs_) {
1164         GetChunk()->Delete(snapshot);
1165     }
1166     hprofs_.clear();
1167 }
1168 
StartHeapSampling(uint64_t samplingInterval,int stackDepth)1169 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
1170 {
1171     if (heapSampling_.get()) {
1172         LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
1173         return false;
1174     }
1175     heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
1176                                                    samplingInterval, stackDepth);
1177     return true;
1178 }
1179 
StopHeapSampling()1180 void HeapProfiler::StopHeapSampling()
1181 {
1182     heapSampling_.reset();
1183 }
1184 
GetAllocationProfile()1185 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
1186 {
1187     if (!heapSampling_.get()) {
1188         LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
1189         return nullptr;
1190     }
1191     return heapSampling_->GetAllocationProfile();
1192 }
1193 }  // namespace panda::ecmascript
1194