• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <sys/wait.h>
16 #include <sys/prctl.h>
17 #include "ecmascript/dfx/hprof/heap_profiler.h"
18 
19 #include "ecmascript/checkpoint/thread_state_transition.h"
20 #include "ecmascript/dfx/hprof/heap_snapshot.h"
21 #include "ecmascript/mem/heap-inl.h"
22 #include "ecmascript/mem/shared_heap/shared_concurrent_sweeper.h"
23 #include "ecmascript/base/block_hook_scope.h"
24 #include "ecmascript/dfx/hprof/heap_root_visitor.h"
25 #include "ecmascript/mem/object_xray.h"
26 #include "ecmascript/platform/backtrace.h"
27 
28 #if defined(ENABLE_DUMP_IN_FAULTLOG)
29 #include "faultloggerd_client.h"
30 #include "heap_profiler.h"
31 #endif
32 
33 namespace panda::ecmascript {
34 
FindId(JSTaggedType addr)35 std::pair<bool, NodeId> EntryIdMap::FindId(JSTaggedType addr)
36 {
37     auto it = idMap_.find(addr);
38     if (it == idMap_.end()) {
39         return std::make_pair(false, GetNextId()); // return nextId if entry not exits
40     } else {
41         return std::make_pair(true, it->second);
42     }
43 }
44 
FindOrInsertNodeId(JSTaggedType addr)45 NodeId EntryIdMap::FindOrInsertNodeId(JSTaggedType addr)
46 {
47     auto it = idMap_.find(addr);
48     if (it != idMap_.end()) {
49         return it->second;
50     }
51     NodeId id = GetNextId();
52     idMap_.emplace(addr, id);
53     return id;
54 }
55 
InsertId(JSTaggedType addr,NodeId id)56 bool EntryIdMap::InsertId(JSTaggedType addr, NodeId id)
57 {
58     auto it = idMap_.find(addr);
59     if (it == idMap_.end()) {
60         idMap_.emplace(addr, id);
61         return true;
62     }
63     idMap_[addr] = id;
64     return false;
65 }
66 
EraseId(JSTaggedType addr)67 bool EntryIdMap::EraseId(JSTaggedType addr)
68 {
69     auto it = idMap_.find(addr);
70     if (it == idMap_.end()) {
71         return false;
72     }
73     idMap_.erase(it);
74     return true;
75 }
76 
Move(JSTaggedType oldAddr,JSTaggedType forwardAddr)77 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
78 {
79     if (oldAddr == forwardAddr) {
80         return true;
81     }
82     auto it = idMap_.find(oldAddr);
83     if (it != idMap_.end()) {
84         NodeId id = it->second;
85         idMap_.erase(it);
86         idMap_[forwardAddr] = id;
87         return true;
88     }
89     return false;
90 }
91 
UpdateEntryIdMap(HeapSnapshot * snapshot)92 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
93 {
94     LOG_ECMA(INFO) << "EntryIdMap::UpdateEntryIdMap";
95     if (snapshot == nullptr) {
96         LOG_ECMA(FATAL) << "EntryIdMap::UpdateEntryIdMap:snapshot is nullptr";
97         UNREACHABLE();
98     }
99     auto nodes = snapshot->GetNodes();
100     CUnorderedMap<JSTaggedType, NodeId> newIdMap;
101     for (auto node : *nodes) {
102         auto addr = node->GetAddress();
103         auto it = idMap_.find(addr);
104         if (it != idMap_.end()) {
105             newIdMap.emplace(addr, it->second);
106         }
107     }
108     idMap_.clear();
109     idMap_ = newIdMap;
110 }
111 
HeapProfiler(const EcmaVM * vm)112 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
113 {
114     isProfiling_ = false;
115     entryIdMap_ = GetChunk()->New<EntryIdMap>();
116 }
117 
~HeapProfiler()118 HeapProfiler::~HeapProfiler()
119 {
120     JSPandaFileManager::GetInstance()->ClearNameMap();
121     ClearSnapshot();
122     GetChunk()->Delete(entryIdMap_);
123 }
124 
AllocationEvent(TaggedObject * address,size_t size)125 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
126 {
127     DISALLOW_GARBAGE_COLLECTION;
128     if (isProfiling_) {
129         // Id will be allocated later while add new node
130         if (heapTracker_ != nullptr) {
131             heapTracker_->AllocationEvent(address, size);
132         }
133     }
134 }
135 
MoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)136 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
137 {
138     LockHolder lock(mutex_);
139     if (isProfiling_) {
140         entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
141         if (heapTracker_ != nullptr) {
142             heapTracker_->MoveEvent(address, forwardAddress, size);
143         }
144     }
145 }
146 
UpdateHeapObjects(HeapSnapshot * snapshot)147 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
148 {
149     SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
150     snapshot->UpdateNodes();
151 }
152 
DumpHeapSnapshotForOOM(const DumpSnapShotOption & dumpOption,bool fromSharedGC)153 void HeapProfiler::DumpHeapSnapshotForOOM([[maybe_unused]] const DumpSnapShotOption &dumpOption,
154                                           [[maybe_unused]] bool fromSharedGC)
155 {
156 #if defined(ENABLE_DUMP_IN_FAULTLOG)
157     // Write in faultlog for heap leak.
158     int32_t fd;
159 #if defined(PANDA_TARGET_ARM32)
160     DumpSnapShotOption doDumpOption;
161     doDumpOption.dumpFormat = DumpFormat::JSON;
162     doDumpOption.isFullGC = dumpOption.isFullGC;
163     doDumpOption.isSimplify = true;
164     doDumpOption.isBeforeFill = false;
165     fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
166     if (fd < 0) {
167         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
168         return;
169     }
170     FileDescriptorStream stream(fd);
171     DumpHeapSnapshot(&stream, doDumpOption);
172 #else
173     if (dumpOption.isDumpOOM && dumpOption.dumpFormat == DumpFormat::BINARY) {
174         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_RAW_SNAPSHOT));
175     } else {
176         fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
177     }
178     if (fd < 0) {
179         LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
180         return;
181     }
182     FileDescriptorStream stream(fd);
183     if (!fromSharedGC) {
184         DumpHeapSnapshot(&stream, dumpOption);
185     } else {
186         DumpHeapSnapshotFromSharedGC(&stream, dumpOption);
187     }
188 #endif
189 #endif
190 }
191 
DumpHeapSnapshotFromSharedGC(Stream * stream,const DumpSnapShotOption & dumpOption)192 void HeapProfiler::DumpHeapSnapshotFromSharedGC(Stream *stream, const DumpSnapShotOption &dumpOption)
193 {
194     base::BlockHookScope blockScope;
195     const_cast<Heap*>(vm_->GetHeap())->Prepare();
196     SharedHeap::GetInstance()->Prepare(true);
197     Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
198         ASSERT(!thread->IsInRunningState());
199         const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
200     });
201     BinaryDump(stream, dumpOption);
202     stream->EndOfStream();
203 }
204 
DoDump(Stream * stream,Progress * progress,const DumpSnapShotOption & dumpOption)205 bool HeapProfiler::DoDump(Stream *stream, Progress *progress, const DumpSnapShotOption &dumpOption)
206 {
207     DISALLOW_GARBAGE_COLLECTION;
208     int32_t heapCount = 0;
209     HeapSnapshot *snapshot = nullptr;
210     {
211         if (dumpOption.isFullGC) {
212             size_t heapSize = vm_->GetHeap()->GetLiveObjectSize();
213             LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
214             heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
215             if (progress != nullptr) {
216                 progress->ReportProgress(0, heapCount);
217             }
218         }
219         snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, dumpOption);
220         ASSERT(snapshot != nullptr);
221     }
222     entryIdMap_->UpdateEntryIdMap(snapshot);
223     isProfiling_ = true;
224     if (progress != nullptr) {
225         progress->ReportProgress(heapCount, heapCount);
226     }
227     if (!stream->Good()) {
228         FileStream newStream(GenDumpFileName(dumpOption.dumpFormat));
229         auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
230         GetChunk()->Delete(snapshot);
231         return serializerResult;
232     }
233     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
234     GetChunk()->Delete(snapshot);
235     return serializerResult;
236 }
237 
CheckAndRemoveWeak(JSTaggedValue & value,uint64_t originalAddr)238 static uint64_t CheckAndRemoveWeak(JSTaggedValue &value, uint64_t originalAddr)
239 {
240     if (!value.IsWeak()) {
241         return originalAddr;
242     }
243     JSTaggedValue weakValue(originalAddr);
244     weakValue.RemoveWeakTag();
245     return weakValue.GetRawData();
246 }
247 
CheckAndAddWeak(JSTaggedValue & value,uint64_t originalAddr)248 static uint64_t CheckAndAddWeak(JSTaggedValue &value, uint64_t originalAddr)
249 {
250     if (!value.IsWeak()) {
251         return originalAddr;
252     }
253     JSTaggedValue weakValue(originalAddr);
254     weakValue.CreateWeakRef();
255     return weakValue.GetRawData();
256 }
257 
VisitMember(ObjectSlot & slot,uint64_t objAddr,CUnorderedSet<uint64_t> & notFoundObj,JSHClass * jsHclass,CUnorderedMap<uint64_t,NewAddr * > & objMap)258 static uint64_t VisitMember(ObjectSlot &slot, uint64_t objAddr, CUnorderedSet<uint64_t> &notFoundObj,
259                             JSHClass *jsHclass, CUnorderedMap<uint64_t, NewAddr *> &objMap)
260 {
261     auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
262     JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
263     auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
264     originalAddr = CheckAndRemoveWeak(value, originalAddr);
265     if (!value.IsHeapObject() || originalAddr == 0) {
266         return 0LL;
267     }
268     auto toItemInfo = objMap.find(originalAddr);
269     if (toItemInfo == objMap.end()) {
270         LOG_ECMA(ERROR) << "ark raw heap decode visit " << std::hex << objAddr << ", type="
271                         << JSHClass::DumpJSType(jsHclass->GetObjectType())
272                         << ", not found member old addr=" << originalAddr;
273         notFoundObj.insert(reinterpret_cast<uint64_t>(*taggedPointerAddr));
274         return 0LL;
275     }
276     auto newAddr = reinterpret_cast<uint64_t>(toItemInfo->second->Data());
277     newAddr = CheckAndAddWeak(value, newAddr);
278     slot.Update(reinterpret_cast<TaggedObject *>(newAddr));
279     return newAddr;
280 }
281 
282 class VisitObjVisitor final : public EcmaObjectRangeVisitor<VisitObjVisitor> {
283 public:
VisitObjVisitor(CUnorderedSet<uint64_t> & notFoundObj,CUnorderedMap<uint64_t,NewAddr * > & objMap,CUnorderedMap<uint64_t,CUnorderedSet<uint64_t>> & refSetMap)284     explicit VisitObjVisitor(CUnorderedSet<uint64_t> &notFoundObj, CUnorderedMap<uint64_t, NewAddr *> &objMap,
285                              CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> &refSetMap)
286         : notFoundObj_(notFoundObj), objMap_(objMap), refSetMap_(refSetMap) {}
287     ~VisitObjVisitor() = default;
288 
VisitObjectRangeImpl(TaggedObject * root,ObjectSlot start,ObjectSlot end,VisitObjectArea area)289     void VisitObjectRangeImpl(TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area)
290     {
291         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
292             return;
293         }
294         auto jsHclass = root->GetClass();
295         auto objAddr = reinterpret_cast<uint64_t>(root);
296         CUnorderedSet<uint64_t> *refSet = nullptr;
297         if (refSetMap_.find(objAddr) != refSetMap_.end()) {
298             refSet = &refSetMap_[objAddr];
299         }
300         for (ObjectSlot slot = start; slot < end; slot++) {
301             auto newAddr = VisitMember(slot, objAddr, notFoundObj_, jsHclass, objMap_);
302             if (jsHclass->IsJsGlobalEnv() && refSet != nullptr && newAddr != 0LL) {
303                 refSet->insert(newAddr);
304             }
305         }
306     }
307 private:
308     CUnorderedSet<uint64_t> &notFoundObj_;
309     CUnorderedMap<uint64_t, NewAddr *> &objMap_;
310     CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> &refSetMap_;
311 };
312 
VisitObj(CUnorderedMap<uint64_t,NewAddr * > & objMap)313 CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> VisitObj(CUnorderedMap<uint64_t, NewAddr *> &objMap)
314 {
315     CUnorderedSet<uint64_t> notFoundObj;
316     CUnorderedMap<uint64_t, CUnorderedSet<uint64_t>> refSetMap; // old addr map to ref set
317     VisitObjVisitor visitor(notFoundObj, objMap, refSetMap);
318     for (auto objInfo : objMap) {
319         auto newAddr = objInfo.second->Data();
320         auto jsHclassAddr = *reinterpret_cast<uint64_t *>(newAddr);
321         auto jsHclassItem = objMap.find(jsHclassAddr);
322         if (jsHclassItem == objMap.end()) {
323             LOG_ECMA(ERROR) << "ark raw heap decode hclass not find jsHclassAddr=" << std::hex << jsHclassAddr;
324             continue;
325         }
326         TaggedObject *obj = reinterpret_cast<TaggedObject *>(newAddr);
327         *reinterpret_cast<uint64_t *>(newAddr) = reinterpret_cast<uint64_t>(jsHclassItem->second->Data());
328         auto jsHclass = reinterpret_cast<JSHClass *>(jsHclassItem->second->Data());
329         if (jsHclass->IsString()) {
330             continue;
331         }
332         if (jsHclass->IsJsGlobalEnv()) {
333             refSetMap.emplace(reinterpret_cast<uint64_t>(newAddr), CUnorderedSet<uint64_t>());
334         }
335         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, jsHclass, visitor);
336     }
337     if (notFoundObj.size() > 0) {
338         LOG_ECMA(ERROR) << "ark raw heap decode visit obj: not found obj num=" << notFoundObj.size();
339     }
340     return refSetMap;
341 }
342 
GetFileSize(std::string & inputFilePath)343 static uint64_t GetFileSize(std::string &inputFilePath)
344 {
345     if (inputFilePath.empty()) {
346         return 0;
347     }
348     struct stat fileInfo;
349     if (stat(inputFilePath.c_str(), &fileInfo) == 0) {
350         return fileInfo.st_size;
351     }
352     return 0;
353 }
354 
ReadFileAtOffset(std::ifstream & file,uint32_t offset,char * buf,uint32_t size)355 bool ReadFileAtOffset(std::ifstream &file, uint32_t offset, char *buf, uint32_t size)
356 {
357     if (buf == nullptr) {
358         LOG_ECMA(ERROR) << "ark raw heap decode file buf is nullptr";
359         return false;
360     }
361     if (!file.is_open()) {
362         LOG_ECMA(ERROR) << "ark raw heap decode file not open";
363         return false;
364     }
365     file.clear();
366     if (!file.seekg(offset)) {
367         LOG_ECMA(ERROR) << "ark raw heap decode file set offset failed, offset=" << offset;
368         return false;
369     }
370     if (file.read(buf, size).fail()) {
371         LOG_ECMA(ERROR) << "ark raw heap decode file read failed, offset=" << offset;
372         return false;
373     }
374     return true;
375 }
376 
DecodeMemObj(std::ifstream & file,CVector<uint32_t> & sections)377 CUnorderedMap<uint64_t, NewAddr *> DecodeMemObj(std::ifstream &file, CVector<uint32_t> &sections)
378 {
379     CUnorderedMap<uint64_t, NewAddr *> objMap; // old addr map to new obj
380     uint32_t heapTotalSize = 0;
381     uint32_t objTotalNum = 0;
382     for (uint32_t sec = 4; sec + 1 < sections.size(); sec += 2) { // 2 :step is 2
383         uint32_t offset = sections[sec];
384         uint32_t secHead[2];
385         if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
386             LOG_ECMA(ERROR) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset="
387                             << offset << ", size=" << sections[sec + 1];
388             return objMap;
389         }
390         LOG_ECMA(INFO) << "ark raw heap decode read obj section failed, sec=" << sec << ", offset=" << offset
391                         << ", size=" << sections[sec + 1] << ", obj num=" << secHead[0];
392         auto tbSize = secHead[0] * sizeof(AddrTableItem);
393         if (secHead[1] != sizeof(AddrTableItem) || tbSize == 0 || tbSize > MAX_OBJ_SIZE) {
394             LOG_ECMA(ERROR) << "ark raw heap decode check obj table section=" << sections[sec] << ", head size="
395                             << sizeof(AddrTableItem) << ", but=" << secHead[1] << "or error table size=" << tbSize;
396             continue;
397         }
398         CVector<char> objTabBuf(tbSize);
399         file.read(objTabBuf.data(), tbSize);
400         auto objTab = reinterpret_cast<AddrTableItem *>(objTabBuf.data());
401         offset += sizeof(secHead);
402         objTotalNum += secHead[0];
403         for (uint32_t i = 0; i < secHead[0]; i++) {
404             heapTotalSize += objTab[i].objSize;
405             auto actSize = i + 1 < secHead[0] ? objTab[i + 1].offset - objTab[i].offset :
406                            sections[sec + 1] - objTab[i].offset - sizeof(secHead);
407             if (actSize != objTab[i].objSize && actSize != sizeof(uint64_t)) {
408                 auto tabOffset = offset + i * sizeof(AddrTableItem);
409                 LOG_ECMA(ERROR) << "ark raw heap decode check obj size i=" << i << std::hex << ", offset=" << tabOffset
410                                 << ", addr=" << objTab[i].addr << ", size=" << objTab[i].objSize << " but=" << actSize;
411                 continue;
412             }
413             objMap.emplace(objTab[i].addr, new NewAddr(actSize, objTab[i].objSize));
414             auto result = ReadFileAtOffset(file, offset + objTab[i].offset, objMap[objTab[i].addr]->Data(), actSize);
415             if (!result) {
416                 LOG_ECMA(ERROR) << "ark raw heap decode read failed, i=" << i << ", base offset=" << offset
417                                 << ", obj addr=" << objTab[i].addr << ", read size=" << actSize;
418                 return objMap;
419             }
420         }
421     }
422     LOG_ECMA(INFO) << "ark raw heap decode read obj, num=" << objTotalNum << ", size=" << heapTotalSize;
423     return objMap;
424 }
425 
DecodeStrTable(StringHashMap * strTable,std::ifstream & file,uint32_t offset,uint32_t secSize)426 CUnorderedMap<uint64_t, CString *> DecodeStrTable(StringHashMap *strTable, std::ifstream &file,
427                                                   uint32_t offset, uint32_t secSize)
428 {
429     uint32_t secHead[2];
430     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
431         LOG_ECMA(ERROR) << "ark raw heap decode read str table failed, offset=" << offset << ", size=" << secSize;
432         return CUnorderedMap<uint64_t, CString *>(0);
433     }
434     uint32_t byteNum = secSize - sizeof(secHead);
435     char *charPtr = new char[byteNum];
436     file.read(charPtr, byteNum);
437     CUnorderedMap<uint64_t, CString *> strTabMap; // old addr map to str id
438     uint32_t cnt = 0;
439     uint32_t baseOff = 0;
440     while (cnt++ < secHead[0]) {
441         uint32_t *u32Ptr = reinterpret_cast<uint32_t *>(charPtr + baseOff);
442         auto strOffset = (u32Ptr[1] + 1) * sizeof(uint64_t) + baseOff;
443         auto getSize = strlen(charPtr + strOffset);
444         if (u32Ptr[0] != getSize) {
445             LOG_ECMA(ERROR) << cnt << " ark raw heap decode check str size=" << u32Ptr[0] << ", but=" << getSize<<"\n";
446         }
447         auto strAddr = strTable->GetString(charPtr + strOffset);
448         uint32_t num = 0;
449         uint64_t *u64Ptr = reinterpret_cast<uint64_t *>(&u32Ptr[2]);
450         while (num < u32Ptr[1]) {
451             strTabMap[u64Ptr[num]] = strAddr;
452             num++;
453         }
454         baseOff = strOffset + u32Ptr[0] + 1;
455     }
456     delete[] charPtr;
457     LOG_ECMA(INFO) << "ark raw heap decode string table size=" << strTable->GetCapcity();
458     return strTabMap;
459 }
460 
DecodeRootTable(std::ifstream & file,uint32_t offset,uint32_t secSize)461 CUnorderedSet<uint64_t> DecodeRootTable(std::ifstream &file, uint32_t offset, uint32_t secSize)
462 {
463     uint32_t secHead[2];
464     if (!ReadFileAtOffset(file, offset, reinterpret_cast<char *>(secHead), sizeof(secHead))) {
465         LOG_ECMA(ERROR) << "ark raw heap decode read root table failed, offset=" << offset << ", size=" << secSize;
466         return CUnorderedSet<uint64_t>(0);
467     }
468     if (secHead[1] != sizeof(uint64_t)) {
469         LOG_ECMA(ERROR) << "ark raw heap decode error root size, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
470         return CUnorderedSet<uint64_t>(0);
471     }
472     auto checkSize = sizeof(uint64_t) * secHead[0] + sizeof(secHead);
473     if (secSize != checkSize) {
474         LOG_ECMA(ERROR) << "ark raw heap decode check root section size=" << secSize << ", but=" << checkSize;
475         return CUnorderedSet<uint64_t>(0);
476     }
477     CVector<uint64_t> rootVec(secHead[0]);
478     file.read(reinterpret_cast<char *>(rootVec.data()), sizeof(uint64_t) * secHead[0]);
479     CUnorderedSet<uint64_t> rootSet;
480     for (auto addr : rootVec) {
481         rootSet.insert(addr);
482     }
483     LOG_ECMA(INFO) << "ark raw heap decode root obj num=" << rootSet.size();
484     return rootSet;
485 }
486 
GetSectionInfo(std::ifstream & file,uint64_t fileSize)487 CVector<uint32_t> GetSectionInfo(std::ifstream &file, uint64_t fileSize)
488 {
489     uint32_t secHead[2];
490     uint32_t fileOffset = fileSize - sizeof(uint32_t) * 2; // 2 : last 2 uint32
491     file.seekg(fileOffset);
492     file.read(reinterpret_cast<char *>(secHead), sizeof(secHead));
493     if (secHead[1] != sizeof(uint32_t)) {
494         LOG_ECMA(ERROR) << "ark raw heap decode unexpect head, need=" << sizeof(uint32_t) << ", but=" << secHead[0];
495         return CVector<uint32_t>(0);
496     }
497     CVector<uint32_t> secInfo(secHead[0]); // last 4 byte is section num
498     auto secInfoSize = secHead[0] * secHead[1];
499     fileOffset -= secInfoSize;
500     file.seekg(fileOffset);
501     file.read(reinterpret_cast<char *>(secInfo.data()), secInfoSize);
502     return secInfo;
503 }
504 
ClearObjMem(CUnorderedMap<uint64_t,NewAddr * > & objMap)505 void ClearObjMem(CUnorderedMap<uint64_t, NewAddr *> &objMap)
506 {
507     for (auto objItem : objMap) {
508         delete objItem.second;
509     }
510     objMap.clear();
511 }
512 
GetValidFileSize(std::string & inputFilePath,uint64_t & fileSize)513 static bool GetValidFileSize(std::string &inputFilePath, uint64_t &fileSize)
514 {
515     fileSize = GetFileSize(inputFilePath);
516     if (fileSize == 0) {
517         LOG_ECMA(ERROR) << "ark raw heap decode get file size=0";
518         return false;
519     }
520     if (fileSize > MAX_FILE_SIZE) {
521         LOG_ECMA(ERROR) << "ark raw heap decode get file size > 4GB, unsupported";
522         return false;
523     }
524     return true;
525 }
526 
GenerateHeapSnapshot(std::string & inputFilePath,std::string & outputPath)527 bool HeapProfiler::GenerateHeapSnapshot(std::string &inputFilePath, std::string &outputPath)
528 {
529     LOG_ECMA(INFO) << "ark raw heap decode start target=" << outputPath;
530     std::string realPath;
531     if (!RealPath(inputFilePath, realPath)) {
532         LOG_ECMA(ERROR) << "get real path failed:" << inputFilePath;
533         return false;
534     }
535     uint64_t fileSize;
536     if (!GetValidFileSize(realPath, fileSize)) {
537         return false;
538     }
539     std::ifstream file(realPath, std::ios::binary);
540     if (!file.is_open()) {
541         LOG_ECMA(ERROR) << "ark raw heap decode file failed:" << realPath;
542         return false;
543     }
544     CVector<uint32_t> sections = GetSectionInfo(file, fileSize);
545     if (sections.size() == 0) {
546         LOG_ECMA(ERROR) << "ark raw heap decode not found section data";
547         return false;
548     }
549     auto objMap = DecodeMemObj(file, sections);
550     auto refSetMap = VisitObj(objMap);
551     auto rootSet = DecodeRootTable(file, sections[0], sections[1]);
552     auto strTabMap = DecodeStrTable(GetEcmaStringTable(), file, sections[2], sections[3]);
553     file.close();
554     DumpSnapShotOption dp;
555     auto *snapshot = new HeapSnapshot(vm_, GetEcmaStringTable(), dp, false, entryIdMap_);
556     LOG_ECMA(INFO) << "ark raw heap decode generate nodes=" << objMap.size();
557     snapshot->GenerateNodeForBinMod(objMap, rootSet, strTabMap);
558     rootSet.clear();
559     strTabMap.clear();
560     LOG_ECMA(INFO) << "ark raw heap decode fill edges=" << objMap.size();
561     snapshot->BuildSnapshotForBinMod(objMap, refSetMap);
562     refSetMap.clear();
563     ClearObjMem(objMap);
564     if (outputPath.empty()) {
565         outputPath = GenDumpFileName(dp.dumpFormat);
566     } else if (outputPath.back() == '/') {
567         outputPath += GenDumpFileName(dp.dumpFormat);
568     }
569     LOG_ECMA(INFO) << "ark raw heap decode serialize file=" << outputPath.c_str();
570     FileStream newStream(outputPath);
571     auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
572     delete snapshot;
573     LOG_ECMA(INFO) << "ark raw heap decode finish";
574     return serializerResult;
575 }
576 
WaitProcess(pid_t pid,const std::function<void (uint8_t)> & callback)577 [[maybe_unused]]static void WaitProcess(pid_t pid, const std::function<void(uint8_t)> &callback)
578 {
579     time_t startTime = time(nullptr);
580     constexpr int DUMP_TIME_OUT = 300;
581     constexpr int DEFAULT_SLEEP_TIME = 100000;
582     while (true) {
583         int status = 0;
584         pid_t p = waitpid(pid, &status, WNOHANG);
585         if (p < 0) {
586             LOG_GC(ERROR) << "DumpHeapSnapshot wait failed ";
587             if (callback) {
588                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::FAILED_TO_WAIT));
589             }
590             return;
591         }
592         if (p == pid) {
593             if (callback) {
594                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::SUCCESS));
595             }
596             return;
597         }
598         if (time(nullptr) > startTime + DUMP_TIME_OUT) {
599             LOG_GC(ERROR) << "DumpHeapSnapshot kill thread, wait " << DUMP_TIME_OUT << " s";
600             kill(pid, SIGTERM);
601             if (callback) {
602                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::WAIT_FORK_PROCESS_TIMEOUT));
603             }
604             return;
605         }
606         usleep(DEFAULT_SLEEP_TIME);
607     }
608 }
609 
610 template<typename Callback>
IterateSharedHeap(Callback & cb)611 void IterateSharedHeap(Callback &cb)
612 {
613     auto heap = SharedHeap::GetInstance();
614     heap->GetOldSpace()->IterateOverObjects(cb);
615     heap->GetCompressSpace()->IterateOverObjects(cb);
616     heap->GetNonMovableSpace()->IterateOverObjects(cb);
617     heap->GetHugeObjectSpace()->IterateOverObjects(cb);
618     heap->GetAppSpawnSpace()->IterateOverObjects(cb);
619     heap->GetReadOnlySpace()->IterateOverObjects(cb);
620 }
621 
GetHeapCntAndSize(const EcmaVM * vm)622 std::pair<uint64_t, uint64_t> GetHeapCntAndSize(const EcmaVM *vm)
623 {
624     uint64_t cnt = 0;
625     uint64_t objectSize = 0;
626     auto cb = [&objectSize, &cnt]([[maybe_unused]] TaggedObject *obj) {
627         objectSize += obj->GetClass()->SizeFromJSHClass(obj);
628         ++cnt;
629     };
630     vm->GetHeap()->IterateOverObjects(cb, false);
631     return std::make_pair(cnt, objectSize);
632 }
633 
GetSharedCntAndSize()634 std::pair<uint64_t, uint64_t> GetSharedCntAndSize()
635 {
636     uint64_t cnt = 0;
637     uint64_t size = 0;
638     auto cb = [&cnt, &size](TaggedObject *obj) {
639         cnt++;
640         size += obj->GetClass()->SizeFromJSHClass(obj);
641     };
642     IterateSharedHeap(cb);
643     return std::make_pair(cnt, size);
644 }
645 
GetRootObjects(const EcmaVM * vm)646 static CUnorderedSet<TaggedObject*> GetRootObjects(const EcmaVM *vm)
647 {
648     CUnorderedSet<TaggedObject*> result {};
649     HeapRootVisitor visitor;
650 
651     class EdgeBuilderRootVisitor final : public RootVisitor {
652     public:
653         explicit EdgeBuilderRootVisitor(CUnorderedSet<TaggedObject*> &result) : result_(result) {}
654         ~EdgeBuilderRootVisitor() = default;
655 
656         void VisitRoot([[maybe_unused]] Root type, ObjectSlot slot) override
657         {
658             JSTaggedValue value((slot).GetTaggedType());
659             if (!value.IsHeapObject()) {
660                 return;
661             }
662             TaggedObject *root = value.GetTaggedObject();
663             result_.insert(root);
664         }
665 
666         void VisitRangeRoot([[maybe_unused]] Root type, ObjectSlot start, ObjectSlot end) override
667         {
668             for (ObjectSlot slot = start; slot < end; slot++) {
669                 JSTaggedValue value((slot).GetTaggedType());
670                 if (!value.IsHeapObject()) {
671                     continue;
672                 }
673                 TaggedObject *root = value.GetTaggedObject();
674                 result_.insert(root);
675             }
676         }
677 
678         void VisitBaseAndDerivedRoot([[maybe_unused]] Root type, [[maybe_unused]] ObjectSlot base,
679             [[maybe_unused]] ObjectSlot derived, [[maybe_unused]] uintptr_t baseOldObject) override {}
680     private:
681         CUnorderedSet<TaggedObject*> &result_;
682     };
683     EdgeBuilderRootVisitor edgeBuilderRootVisitor(result);
684 
685     visitor.VisitHeapRoots(vm->GetJSThread(), edgeBuilderRootVisitor);
686     SharedModuleManager::GetInstance()->Iterate(edgeBuilderRootVisitor);
687     Runtime::GetInstance()->IterateCachedStringRoot(edgeBuilderRootVisitor);
688     return result;
689 }
690 
691 class GetNotFoundObjVisitor final : public EcmaObjectRangeVisitor<GetNotFoundObjVisitor> {
692 public:
GetNotFoundObjVisitor(CUnorderedSet<TaggedObject * > & notFoundObjSet,CUnorderedSet<TaggedObject * > & allHeapObjSet)693     explicit GetNotFoundObjVisitor(CUnorderedSet<TaggedObject *> &notFoundObjSet,
694                                    CUnorderedSet<TaggedObject*> &allHeapObjSet)
695         : notFoundObjSet_(notFoundObjSet), allHeapObjSet_(allHeapObjSet) {}
696     ~GetNotFoundObjVisitor() = default;
697 
VisitObjectRangeImpl(TaggedObject * root,ObjectSlot start,ObjectSlot end,VisitObjectArea area)698     void VisitObjectRangeImpl([[maybe_unused]] TaggedObject *root, ObjectSlot start, ObjectSlot end,
699                               VisitObjectArea area)
700     {
701         if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
702             return;
703         }
704         for (ObjectSlot slot = start; slot < end; slot++) {
705             auto taggedPointerAddr = reinterpret_cast<uint64_t **>(slot.SlotAddress());
706             JSTaggedValue value(reinterpret_cast<TaggedObject *>(*taggedPointerAddr));
707             auto originalAddr = reinterpret_cast<uint64_t>(*taggedPointerAddr);
708             if (!value.IsHeapObject() || originalAddr == 0) {
709                 continue;
710             }
711             if (value.IsWeakForHeapObject()) {
712                 originalAddr -= 1;
713             }
714             if (allHeapObjSet_.find(reinterpret_cast<TaggedObject *>(originalAddr)) != allHeapObjSet_.end()) {
715                 continue;
716             }
717             auto obj = reinterpret_cast<TaggedObject *>(*taggedPointerAddr);
718             if (notFoundObjSet_.find(obj) != notFoundObjSet_.end()) {
719                 continue;
720             }
721             notFoundObjSet_.insert(obj);
722         }
723     }
724 private:
725     CUnorderedSet<TaggedObject *> &notFoundObjSet_;
726     CUnorderedSet<TaggedObject*> &allHeapObjSet_;
727 };
728 
GetNotFoundObj(const EcmaVM * vm)729 size_t GetNotFoundObj(const EcmaVM *vm)
730 {
731     size_t heapTotalSize = 0;
732     CUnorderedSet<TaggedObject*> allHeapObjSet {};
733     auto handleObj = [&allHeapObjSet, &heapTotalSize](TaggedObject *obj) {
734         allHeapObjSet.insert(obj);
735         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
736         heapTotalSize += objSize;
737     };
738     vm->GetHeap()->IterateOverObjects(handleObj, false);
739     vm->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
740     IterateSharedHeap(handleObj);
741     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound heap count:" << allHeapObjSet.size()
742                    << ", heap size=" << heapTotalSize;
743     CUnorderedSet<TaggedObject *> notFoundObjSet {};
744     GetNotFoundObjVisitor visitor(notFoundObjSet, allHeapObjSet);
745     for (auto obj : allHeapObjSet) {
746         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(obj, obj->GetClass(), visitor);
747     }
748     LOG_ECMA(INFO) << "ark raw heap dump GetNotFound not found count:" << notFoundObjSet.size();
749     return notFoundObjSet.size();
750 }
751 
CopyObjectMem2Buf(char * objTable,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBufMap)752 uint32_t HeapProfiler::CopyObjectMem2Buf(char *objTable, uint32_t objNum,
753                                          CVector<std::pair<char *, uint32_t>> &memBufMap)
754 {
755     char *currMemBuf = nullptr;
756     uint32_t currSize = 0;
757     uint32_t totalSize = 0;
758     uint32_t curOffset = objNum * sizeof(AddrTableItem);
759     auto objHeaders = reinterpret_cast<AddrTableItem *>(objTable);
760     for (uint32_t j = 0; j < objNum; ++j) {
761         auto obj = reinterpret_cast<TaggedObject *>(objHeaders[j].addr);
762         JSTaggedValue value(obj);
763         uint64_t objSize = obj->GetClass()->SizeFromJSHClass(obj);
764         totalSize += objSize;
765         if (currSize + objSize > PER_GROUP_MEM_SIZE || currMemBuf == nullptr) {
766             if (currMemBuf != nullptr) {
767                 memBufMap.push_back({currMemBuf, currSize});
768             }
769             currSize = 0;
770             currMemBuf = chunk_.NewArray<char>(objSize > PER_GROUP_MEM_SIZE? objSize : PER_GROUP_MEM_SIZE);
771         }
772         objHeaders[j].objSize = objSize;
773         objHeaders[j].offset = curOffset;
774         int32_t ret;
775         if (value.IsString()) {
776             CVector<uint64_t> strTmp(objSize / sizeof(uint64_t), 0);
777             strTmp[0] = *reinterpret_cast<uint64_t *>(objHeaders[j].addr);
778             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(strTmp.data()), objSize);
779         } else {
780             ret = memcpy_s(currMemBuf + currSize, objSize, reinterpret_cast<void *>(objHeaders[j].addr), objSize);
781         }
782         if (ret != 0) {
783             LOG_ECMA(ERROR) << "ark raw heap dump CopyObjectMem memcpy_s failed, currSize="
784                             << currSize << ",objSize=" << objSize << ",addr=" << objHeaders[j].addr;
785             return totalSize;
786         }
787         curOffset += objSize;
788         currSize += objSize;
789     }
790     if (currSize > 0) {
791         memBufMap.push_back({currMemBuf, currSize});
792     } else if (currMemBuf != nullptr) {
793         chunk_.Delete<char>(currMemBuf);
794     }
795     return totalSize;
796 }
797 
GenObjTable(CUnorderedMap<char *,uint32_t> & headerMap,HeapSnapshot * snapshot,CUnorderedMap<uint64_t,CVector<uint64_t>> & strIdMap)798 uint32_t HeapProfiler::GenObjTable(CUnorderedMap<char *, uint32_t> &headerMap, HeapSnapshot *snapshot,
799                                    CUnorderedMap<uint64_t, CVector<uint64_t>> &strIdMap)
800 {
801     char *currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
802     uint32_t index = 0;
803     uint32_t objNum = 0;
804     auto table = reinterpret_cast<AddrTableItem *>(currBuf);
805     auto handleObj = [&index, &table, &objNum, &headerMap, &currBuf, &snapshot, &strIdMap, this](TaggedObject *obj) {
806         JSTaggedValue value(obj);
807         auto taggedType = value.GetRawData();
808         auto [exist, id] = entryIdMap_->FindId(taggedType);
809         if (!exist) {
810             entryIdMap_->InsertId(taggedType, id);
811         }
812         table[index].addr = reinterpret_cast<uint64_t>(obj);
813         table[index].id = id;
814         auto strId = snapshot->GenerateStringId(obj);
815         if (strId != 1) { // 1 : invalid str id
816             if (strIdMap.find(strId) == strIdMap.end()) {
817                 strIdMap.emplace(strId, CVector<uint64_t>());
818             }
819             strIdMap[strId].push_back(table[index].addr);
820         }
821         index++;
822         if (index == HEAD_NUM_PER_GROUP) {
823             headerMap.emplace(currBuf, index);
824             objNum += HEAD_NUM_PER_GROUP;
825             index = 0;
826             currBuf = chunk_.NewArray<char>(PER_GROUP_MEM_SIZE);
827             table = reinterpret_cast<AddrTableItem *>(currBuf);
828         }
829     };
830     vm_->GetHeap()->IterateOverObjects(handleObj, false);
831     vm_->GetHeap()->GetCompressSpace()->IterateOverObjects(handleObj);
832     IterateSharedHeap(handleObj);
833     objNum += index;
834     if (index != 0) {
835         headerMap.emplace(currBuf, index);
836     } else {
837         chunk_.Delete<char>(currBuf);
838     }
839     return objNum;
840 }
841 
842 // 4 byte: root_num
843 // 4 byte: unit size = sizeof(addr), 8 byte here
844 // {8 byte: root obj addr} * root_num
GenRootTable(Stream * stream)845 uint32_t HeapProfiler::GenRootTable(Stream *stream)
846 {
847     auto roots = GetRootObjects(vm_);
848     uint32_t rootSecHeadSize = 8; // 8 : root num 、 unit size
849     auto rootSecSize = roots.size() * (sizeof(TaggedObject *)) + rootSecHeadSize;
850     auto memBuf = chunk_.NewArray<char>(rootSecSize);
851     uint32_t *rootHeader = reinterpret_cast<uint32_t *>(memBuf);
852     uint64_t *rootBuf = reinterpret_cast<uint64_t *>(memBuf + rootSecHeadSize); // 8 : root addr start offset
853     rootHeader[0] = roots.size(); // 0: root num
854     rootHeader[1] = sizeof(TaggedObject *); // 1: unit size
855     auto currInd = 0;
856     for (auto root : roots) {
857         rootBuf[currInd++] = reinterpret_cast<uint64_t>(root);
858     }
859     LOG_ECMA(INFO) << "ark raw heap dump GenRootTable root cnt="<<roots.size();
860     stream->WriteBinBlock(memBuf, rootSecSize);
861     chunk_.Delete<char>(memBuf);
862     return rootSecSize;
863 }
864 
865 
866 // 4 byte: obj_num
867 // 4 byte: unit size = sizeof(AddrTableItem)
868 // {AddrTableItem} * obj_num
869 // {obj contents} * obj_num
WriteToBinFile(Stream * stream,char * objTab,uint32_t objNum,CVector<std::pair<char *,uint32_t>> & memBuf)870 uint32_t HeapProfiler::WriteToBinFile(Stream *stream, char *objTab, uint32_t objNum,
871                                       CVector<std::pair<char *, uint32_t>> &memBuf)
872 {
873     uint32_t secHeader[] = {objNum, sizeof(AddrTableItem)};
874     uint32_t secTotalSize = sizeof(secHeader);
875     stream->WriteBinBlock(reinterpret_cast<char *>(secHeader), secTotalSize);
876     uint32_t headerSize = objNum * sizeof(AddrTableItem);
877     secTotalSize += headerSize;
878     stream->WriteBinBlock(objTab, headerSize); // write obj header
879     chunk_.Delete<char>(objTab);
880     for (auto memItem : memBuf) {
881         stream->WriteBinBlock(memItem.first, memItem.second);
882         secTotalSize += memItem.second;
883         chunk_.Delete<char>(memItem.first);
884     }
885     return secTotalSize;
886 }
887 
DumpRawHeap(Stream * stream,uint32_t & fileOffset,CVector<uint32_t> & secIndexVec)888 bool HeapProfiler::DumpRawHeap(Stream *stream, uint32_t &fileOffset, CVector<uint32_t> &secIndexVec)
889 {
890     CUnorderedMap<char *, uint32_t> objTabMap; // buf map table num
891     CUnorderedMap<uint64_t, CVector<uint64_t>> strIdMapObjVec; // string id map to objs vector
892     DumpSnapShotOption op;
893     auto snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), op, false, entryIdMap_);
894     uint32_t objTotalNum = GenObjTable(objTabMap, snapshot, strIdMapObjVec);
895     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap totalObjNumber=" << objTotalNum;
896     CVector<CVector<std::pair<char *, uint32_t>>> allMemBuf(objTabMap.size(), CVector<std::pair<char *, uint32_t>>());
897     CVector<std::thread> threadsVec;
898     CVector<char *> objTabVec(objTabMap.size());
899     uint32_t index = 0;
900     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap start to copy, thread num=" << objTabMap.size();
901     for (auto tableItem : objTabMap) {
902         auto tdCb = [this, &tableItem, &allMemBuf, &index] () {
903             CopyObjectMem2Buf(tableItem.first, tableItem.second, allMemBuf[index]);
904         };
905         threadsVec.emplace_back(tdCb);
906         objTabVec[index] = tableItem.first;
907         threadsVec[index].join();
908         ++index;
909     }
910     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write string, num=" << strIdMapObjVec.size();
911     secIndexVec.push_back(fileOffset); // string table section offset
912     auto size = HeapSnapshotJSONSerializer::DumpStringTable(GetEcmaStringTable(), stream, strIdMapObjVec);
913     secIndexVec.push_back(size); // string table section size
914     GetChunk()->Delete(snapshot);
915     fileOffset += size;
916     strIdMapObjVec.clear();
917     uint32_t finCnt = 0;
918     LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write obj, offset=" << fileOffset;
919     while (finCnt < threadsVec.size()) {
920         for (index = 0; index < threadsVec.size(); ++index) {
921             if (threadsVec[index].joinable()) { // thread not finished
922                 continue;
923             }
924             ++finCnt;
925             secIndexVec.push_back(fileOffset); // current section offset
926             auto objNum = objTabMap[objTabVec[index]];
927             auto currSecSize = WriteToBinFile(stream, objTabVec[index], objNum, allMemBuf[index]);
928             LOG_ECMA(INFO) << "ark raw heap dump DumpRawHeap write offset=" << fileOffset << ", size=" << currSecSize;
929             secIndexVec.push_back(currSecSize); // current section size
930             fileOffset += currSecSize;
931         }
932     }
933     return true;
934 }
935 
936 //  * 8 byte: version id
937 //  * root table section
938 //  * string table section
939 //  * {heap section / share heap section} * thread_num
940 //  * 4 byte: root table section offset
941 //  * 4 byte: root table section size
942 //  * 4 byte: string table section offset
943 //  * 4 byte: string table section size
944 //  * {
945 //  * 4 byte: obj section offset
946 //  * 4 byte: obj section size
947 //  * } * thread_num
948 //  * 4 byte: section_offset_num size, 4 byte here
949 //  * 4 byte: section_num
BinaryDump(Stream * stream,const DumpSnapShotOption & dumpOption)950 bool HeapProfiler::BinaryDump(Stream *stream, const DumpSnapShotOption &dumpOption)
951 {
952     [[maybe_unused]] EcmaHandleScope ecmaHandleScope(vm_->GetJSThread());
953     DumpSnapShotOption option;
954     auto stringTable = chunk_.New<StringHashMap>(vm_);
955     auto snapshot = chunk_.New<HeapSnapshot>(vm_, stringTable, option, false, entryIdMap_);
956     RawHeapDump rawHeapDump(vm_, stream, snapshot, entryIdMap_);
957     rawHeapDump.BinaryDump(dumpOption);
958     chunk_.Delete<StringHashMap>(stringTable);
959     chunk_.Delete<HeapSnapshot>(snapshot);
960     return true;
961 }
962 
FillIdMap()963 void HeapProfiler::FillIdMap()
964 {
965     EntryIdMap* newEntryIdMap = GetChunk()->New<EntryIdMap>();
966     // Iterate SharedHeap Object
967     SharedHeap* sHeap = SharedHeap::GetInstance();
968     if (sHeap != nullptr) {
969         sHeap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
970             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
971             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
972             newEntryIdMap->InsertId(addr, sequenceId);
973         });
974         sHeap->GetReadOnlySpace()->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
975             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
976             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
977             newEntryIdMap->InsertId(addr, sequenceId);
978         });
979     }
980 
981     // Iterate LocalHeap Object
982     auto heap = vm_->GetHeap();
983     if (heap != nullptr) {
984         heap->IterateOverObjects([newEntryIdMap, this](TaggedObject *obj) {
985             JSTaggedType addr = ((JSTaggedValue)obj).GetRawData();
986             auto [idExist, sequenceId] = entryIdMap_->FindId(addr);
987             newEntryIdMap->InsertId(addr, sequenceId);
988         });
989     }
990 
991     // copy entryIdMap
992     CUnorderedMap<JSTaggedType, NodeId>* idMap = entryIdMap_->GetIdMap();
993     CUnorderedMap<JSTaggedType, NodeId>* newIdMap = newEntryIdMap->GetIdMap();
994     *idMap = *newIdMap;
995 
996     GetChunk()->Delete(newEntryIdMap);
997 }
998 
DumpHeapSnapshot(Stream * stream,const DumpSnapShotOption & dumpOption,Progress * progress,std::function<void (uint8_t)> callback)999 bool HeapProfiler::DumpHeapSnapshot(Stream *stream, const DumpSnapShotOption &dumpOption, Progress *progress,
1000                                     std::function<void(uint8_t)> callback)
1001 {
1002     bool res = false;
1003     base::BlockHookScope blockScope;
1004     ThreadManagedScope managedScope(vm_->GetJSThread());
1005     pid_t pid = -1;
1006     {
1007         if (dumpOption.isFullGC) {
1008             [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
1009             ForceSharedGC();
1010             ASSERT(heapClean);
1011         }
1012         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread()); // suspend All.
1013         const_cast<Heap*>(vm_->GetHeap())->Prepare();
1014         SharedHeap::GetInstance()->Prepare(true);
1015         Runtime::GetInstance()->GCIterateThreadList([&](JSThread *thread) {
1016             ASSERT(!thread->IsInRunningState());
1017             const_cast<Heap*>(thread->GetEcmaVM()->GetHeap())->FillBumpPointerForTlab();
1018         });
1019         // OOM and ThresholdReachedDump.
1020         if (dumpOption.isDumpOOM) {
1021             res = BinaryDump(stream, dumpOption);
1022             stream->EndOfStream();
1023             return res;
1024         }
1025         // ide.
1026         if (dumpOption.isSync) {
1027             return DoDump(stream, progress, dumpOption);
1028         }
1029         // hidumper do fork and fillmap.
1030         if (dumpOption.isBeforeFill) {
1031             FillIdMap();
1032         }
1033         // fork
1034         if ((pid = fork()) < 0) {
1035             LOG_ECMA(ERROR) << "DumpHeapSnapshot fork failed!";
1036             if (callback) {
1037                 callback(static_cast<uint8_t>(DumpHeapSnapshotStatus::FORK_FAILED));
1038             }
1039             return false;
1040         }
1041         if (pid == 0) {
1042             vm_->GetAssociatedJSThread()->EnableCrossThreadExecution();
1043             prctl(PR_SET_NAME, reinterpret_cast<unsigned long>("dump_process"), 0, 0, 0);
1044             if (dumpOption.dumpFormat == DumpFormat::BINARY) {
1045                 res = BinaryDump(stream, dumpOption);
1046             } else {
1047                 res = DoDump(stream, progress, dumpOption);
1048             }
1049             _exit(0);
1050         }
1051     }
1052     if (pid != 0) {
1053         std::thread thread(&WaitProcess, pid, callback);
1054         thread.detach();
1055         stream->EndOfStream();
1056     }
1057     isProfiling_ = true;
1058     return res;
1059 }
1060 
StartHeapTracking(double timeInterval,bool isVmMode,Stream * stream,bool traceAllocation,bool newThread)1061 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
1062                                      bool traceAllocation, bool newThread)
1063 {
1064     vm_->CollectGarbage(TriggerGCType::OLD_GC);
1065     ForceSharedGC();
1066     SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1067     DumpSnapShotOption dumpOption;
1068     dumpOption.isVmMode = isVmMode;
1069     dumpOption.isPrivate = false;
1070     dumpOption.captureNumericValue = false;
1071     HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, dumpOption, traceAllocation);
1072     if (snapshot == nullptr) {
1073         return false;
1074     }
1075     isProfiling_ = true;
1076     UpdateHeapObjects(snapshot);
1077     heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
1078     const_cast<EcmaVM *>(vm_)->StartHeapTracking();
1079     if (newThread) {
1080         heapTracker_->StartTracing();
1081     }
1082 
1083     return true;
1084 }
1085 
UpdateHeapTracking(Stream * stream)1086 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
1087 {
1088     if (heapTracker_ == nullptr) {
1089         return false;
1090     }
1091     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1092     if (snapshot == nullptr) {
1093         return false;
1094     }
1095 
1096     {
1097         vm_->CollectGarbage(TriggerGCType::OLD_GC);
1098         ForceSharedGC();
1099         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1100         UpdateHeapObjects(snapshot);
1101         snapshot->RecordSampleTime();
1102     }
1103 
1104     if (stream != nullptr) {
1105         snapshot->PushHeapStat(stream);
1106     }
1107 
1108     return true;
1109 }
1110 
StopHeapTracking(Stream * stream,Progress * progress,bool newThread)1111 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
1112 {
1113     if (heapTracker_ == nullptr) {
1114         return false;
1115     }
1116     int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
1117 
1118     const_cast<EcmaVM *>(vm_)->StopHeapTracking();
1119     if (newThread) {
1120         heapTracker_->StopTracing();
1121     }
1122 
1123     HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
1124     if (snapshot == nullptr) {
1125         return false;
1126     }
1127 
1128     if (progress != nullptr) {
1129         progress->ReportProgress(0, heapCount);
1130     }
1131     {
1132         ForceSharedGC();
1133         SuspendAllScope suspendScope(vm_->GetAssociatedJSThread());
1134         SharedHeap::GetInstance()->GetSweeper()->WaitAllTaskFinished();
1135         snapshot->FinishSnapshot();
1136     }
1137 
1138     isProfiling_ = false;
1139     if (progress != nullptr) {
1140         progress->ReportProgress(heapCount, heapCount);
1141     }
1142     return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
1143 }
1144 
GenDumpFileName(DumpFormat dumpFormat)1145 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
1146 {
1147     CString filename("hprof_");
1148     switch (dumpFormat) {
1149         case DumpFormat::JSON:
1150             filename.append(GetTimeStamp());
1151             break;
1152         case DumpFormat::BINARY:
1153             filename.append("unimplemented");
1154             break;
1155         case DumpFormat::OTHER:
1156             filename.append("unimplemented");
1157             break;
1158         default:
1159             filename.append("unimplemented");
1160             break;
1161     }
1162     filename.append(".heapsnapshot");
1163     return ConvertToStdString(filename);
1164 }
1165 
GetTimeStamp()1166 CString HeapProfiler::GetTimeStamp()
1167 {
1168     std::time_t timeSource = std::time(nullptr);
1169     struct tm tm {
1170     };
1171     struct tm *timeData = localtime_r(&timeSource, &tm);
1172     if (timeData == nullptr) {
1173         LOG_FULL(FATAL) << "localtime_r failed";
1174         UNREACHABLE();
1175     }
1176     CString stamp;
1177     const int TIME_START = 1900;
1178     stamp.append(ToCString(timeData->tm_year + TIME_START))
1179         .append("-")
1180         .append(ToCString(timeData->tm_mon + 1))
1181         .append("-")
1182         .append(ToCString(timeData->tm_mday))
1183         .append("_")
1184         .append(ToCString(timeData->tm_hour))
1185         .append("-")
1186         .append(ToCString(timeData->tm_min))
1187         .append("-")
1188         .append(ToCString(timeData->tm_sec));
1189     return stamp;
1190 }
1191 
ForceFullGC(const EcmaVM * vm)1192 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
1193 {
1194     if (vm->IsInitialized()) {
1195         const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
1196         return true;
1197     }
1198     return false;
1199 }
1200 
ForceSharedGC()1201 void HeapProfiler::ForceSharedGC()
1202 {
1203     SharedHeap *sHeap = SharedHeap::GetInstance();
1204     sHeap->CollectGarbage<TriggerGCType::SHARED_GC, GCReason::OTHER>(vm_->GetAssociatedJSThread());
1205     sHeap->GetSweeper()->WaitAllTaskFinished();
1206 }
1207 
MakeHeapSnapshot(SampleType sampleType,const DumpSnapShotOption & dumpOption,bool traceAllocation)1208 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, const DumpSnapShotOption &dumpOption,
1209                                              bool traceAllocation)
1210 {
1211     LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
1212     if (dumpOption.isFullGC) {
1213         DISALLOW_GARBAGE_COLLECTION;
1214         const_cast<Heap *>(vm_->GetHeap())->Prepare();
1215     }
1216     switch (sampleType) {
1217         case SampleType::ONE_SHOT: {
1218             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1219                                                            traceAllocation, entryIdMap_);
1220             if (snapshot == nullptr) {
1221                 LOG_FULL(FATAL) << "alloc snapshot failed";
1222                 UNREACHABLE();
1223             }
1224             snapshot->BuildUp(dumpOption.isSimplify);
1225             return snapshot;
1226         }
1227         case SampleType::REAL_TIME: {
1228             auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), dumpOption,
1229                                                            traceAllocation, entryIdMap_);
1230             if (snapshot == nullptr) {
1231                 LOG_FULL(FATAL) << "alloc snapshot failed";
1232                 UNREACHABLE();
1233             }
1234             AddSnapshot(snapshot);
1235             snapshot->PrepareSnapshot();
1236             return snapshot;
1237         }
1238         default:
1239             return nullptr;
1240     }
1241 }
1242 
AddSnapshot(HeapSnapshot * snapshot)1243 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
1244 {
1245     if (hprofs_.size() >= MAX_NUM_HPROF) {
1246         ClearSnapshot();
1247     }
1248     ASSERT(snapshot != nullptr);
1249     hprofs_.emplace_back(snapshot);
1250 }
1251 
ClearSnapshot()1252 void HeapProfiler::ClearSnapshot()
1253 {
1254     for (auto *snapshot : hprofs_) {
1255         GetChunk()->Delete(snapshot);
1256     }
1257     hprofs_.clear();
1258 }
1259 
StartHeapSampling(uint64_t samplingInterval,int stackDepth)1260 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
1261 {
1262     if (heapSampling_.get()) {
1263         LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
1264         return false;
1265     }
1266     heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
1267                                                    samplingInterval, stackDepth);
1268     return true;
1269 }
1270 
StopHeapSampling()1271 void HeapProfiler::StopHeapSampling()
1272 {
1273     heapSampling_.reset();
1274 }
1275 
GetAllocationProfile()1276 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
1277 {
1278     if (!heapSampling_.get()) {
1279         LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
1280         return nullptr;
1281     }
1282     return heapSampling_->GetAllocationProfile();
1283 }
1284 
1285 #if defined(ENABLE_LOCAL_HANDLE_LEAK_DETECT)
IsStartLocalHandleLeakDetect() const1286 bool HeapProfiler::IsStartLocalHandleLeakDetect() const
1287 {
1288     return startLocalHandleLeakDetect_;
1289 }
1290 
SwitchStartLocalHandleLeakDetect()1291 void HeapProfiler::SwitchStartLocalHandleLeakDetect()
1292 {
1293     startLocalHandleLeakDetect_ = !startLocalHandleLeakDetect_;
1294 }
1295 
IncreaseScopeCount()1296 void HeapProfiler::IncreaseScopeCount()
1297 {
1298     ++scopeCount_;
1299 }
1300 
DecreaseScopeCount()1301 void HeapProfiler::DecreaseScopeCount()
1302 {
1303     --scopeCount_;
1304 }
1305 
GetScopeCount() const1306 uint32_t HeapProfiler::GetScopeCount() const
1307 {
1308     return scopeCount_;
1309 }
1310 
PushToActiveScopeStack(LocalScope * localScope,EcmaHandleScope * ecmaHandleScope)1311 void HeapProfiler::PushToActiveScopeStack(LocalScope *localScope, EcmaHandleScope *ecmaHandleScope)
1312 {
1313     activeScopeStack_.emplace(std::make_shared<ScopeWrapper>(localScope, ecmaHandleScope));
1314 }
1315 
PopFromActiveScopeStack()1316 void HeapProfiler::PopFromActiveScopeStack()
1317 {
1318     if (!activeScopeStack_.empty()) {
1319         activeScopeStack_.pop();
1320     }
1321 }
1322 
GetLastActiveScope() const1323 std::shared_ptr<ScopeWrapper> HeapProfiler::GetLastActiveScope() const
1324 {
1325     if (!activeScopeStack_.empty()) {
1326         return activeScopeStack_.top();
1327     }
1328     return nullptr;
1329 }
1330 
ClearHandleBackTrace()1331 void HeapProfiler::ClearHandleBackTrace()
1332 {
1333     handleBackTrace_.clear();
1334 }
1335 
GetBackTraceOfHandle(const uintptr_t handle) const1336 std::string_view HeapProfiler::GetBackTraceOfHandle(const uintptr_t handle) const
1337 {
1338     const auto it = handleBackTrace_.find(handle);
1339     if (it != handleBackTrace_.end()) {
1340         return std::string_view(it->second);
1341     }
1342     return "";
1343 }
1344 
InsertHandleBackTrace(uintptr_t handle,const std::string & backTrace)1345 bool HeapProfiler::InsertHandleBackTrace(uintptr_t handle, const std::string &backTrace)
1346 {
1347     auto [iter, inserted] = handleBackTrace_.insert_or_assign(handle, backTrace);
1348     return inserted;
1349 }
1350 
WriteToLeakStackTraceFd(std::ostringstream & buffer) const1351 void HeapProfiler::WriteToLeakStackTraceFd(std::ostringstream &buffer) const
1352 {
1353     if (leakStackTraceFd_ < 0) {
1354         return;
1355     }
1356     buffer << std::endl;
1357     DPrintf(reinterpret_cast<fd_t>(leakStackTraceFd_), buffer.str());
1358     buffer.str("");
1359 }
1360 
SetLeakStackTraceFd(const int32_t fd)1361 void HeapProfiler::SetLeakStackTraceFd(const int32_t fd)
1362 {
1363     leakStackTraceFd_ = fd;
1364 }
1365 
GetLeakStackTraceFd() const1366 int32_t HeapProfiler::GetLeakStackTraceFd() const
1367 {
1368     return leakStackTraceFd_;
1369 }
1370 
CloseLeakStackTraceFd()1371 void HeapProfiler::CloseLeakStackTraceFd()
1372 {
1373     if (leakStackTraceFd_ != -1) {
1374         FSync(reinterpret_cast<fd_t>(leakStackTraceFd_));
1375         Close(reinterpret_cast<fd_t>(leakStackTraceFd_));
1376         leakStackTraceFd_ = -1;
1377     }
1378 }
1379 
StorePotentiallyLeakHandles(const uintptr_t handle)1380 void HeapProfiler::StorePotentiallyLeakHandles(const uintptr_t handle)
1381 {
1382     bool isDetectedByScopeCount { GetScopeCount() <= 1 };
1383     bool isDetectedByScopeTime { false };
1384     if (auto lastScope = GetLastActiveScope()) {
1385         auto timeSinceLastScopeCreate = lastScope->clockScope_.TotalSpentTime();
1386         isDetectedByScopeTime = timeSinceLastScopeCreate >= LOCAL_HANDLE_LEAK_TIME_MS;
1387     }
1388     if (isDetectedByScopeCount || isDetectedByScopeTime) {
1389         std::ostringstream stack;
1390         Backtrace(stack, true);
1391         InsertHandleBackTrace(handle, stack.str());
1392     }
1393 }
1394 #endif  // ENABLE_LOCAL_HANDLE_LEAK_DETECT
1395 
BinaryDump(const DumpSnapShotOption & dumpOption)1396 void RawHeapDump::BinaryDump(const DumpSnapShotOption &dumpOption)
1397 {
1398     ClearVisitMark();
1399     DumpVersion();
1400     DumpRootTable();
1401     DumpStringTable();
1402     DumpObjectTable(dumpOption);
1403     DumpObjectMemory();
1404     DumpSectionIndex();
1405     WriteBinBlock();
1406     EndVisitMark();
1407 }
1408 
VisitObjectRangeImpl(TaggedObject * root,ObjectSlot start,ObjectSlot end,VisitObjectArea area)1409 void RawHeapDump::VisitObjectRangeImpl(TaggedObject *root, ObjectSlot start, ObjectSlot end, VisitObjectArea area)
1410 {
1411     if (area == VisitObjectArea::RAW_DATA || area == VisitObjectArea::NATIVE_POINTER) {
1412         return;
1413     }
1414     auto hclass = reinterpret_cast<TaggedObject *>(root->GetClass());
1415     if (MarkObject(hclass)) {
1416         bfsQueue_.emplace(hclass);
1417     }
1418     for (ObjectSlot slot = start; slot < end; slot++) {
1419         auto value = slot.GetTaggedValue();
1420         if (!value.IsHeapObject()) {
1421             continue;
1422         }
1423         auto obj = value.GetWeakReferentUnChecked();
1424         if (MarkObject(obj)) {
1425             bfsQueue_.emplace(obj);
1426         }
1427     }
1428 }
1429 
DumpVersion()1430 void RawHeapDump::DumpVersion()
1431 {
1432     WriteChunk(const_cast<char *>(versionID), VERSION_ID_SIZE);
1433     LOG_ECMA(INFO) << "rawheap dump, version " << std::string(versionID);
1434 }
1435 
DumpRootTable()1436 void RawHeapDump::DumpRootTable()
1437 {
1438     HeapRootVisitor rootVisitor;
1439     rootVisitor.VisitHeapRoots(vm_->GetJSThread(), *this);
1440     SharedModuleManager::GetInstance()->Iterate(*this);
1441     Runtime::GetInstance()->IterateCachedStringRoot(*this);
1442 
1443     secIndexVec_.push_back(fileOffset_);
1444     uint32_t rootObjCnt = roots_.size();
1445     uint32_t rootTableHeader[2] = {rootObjCnt, sizeof(TaggedObject *)};
1446     WriteChunk(reinterpret_cast<char *>(rootTableHeader), sizeof(rootTableHeader));
1447     for (auto &root : roots_) {
1448         uint64_t addr = reinterpret_cast<uint64_t>(root);
1449         WriteU64(addr);
1450         ProcessMarkObjectsFromRoot(root);
1451     }
1452     secIndexVec_.push_back(sizeof(TaggedObject *) * rootObjCnt + sizeof(rootTableHeader));
1453     LOG_ECMA(INFO) << "rawheap dump, root count " << rootObjCnt;
1454 }
1455 
DumpObjectTable(const DumpSnapShotOption & dumpOption)1456 void RawHeapDump::DumpObjectTable(const DumpSnapShotOption &dumpOption)
1457 {
1458     secIndexVec_.push_back(fileOffset_);
1459     objCnt_ += readOnlyObjects_.size();
1460     uint32_t header[2] = {objCnt_, sizeof(AddrTableItem)};
1461     WriteChunk(reinterpret_cast<char *>(header), sizeof(header));
1462 
1463     uint32_t offset = header[0] * header[1];
1464     auto objTableDump = [&offset, &dumpOption, this](void *addr) {
1465         auto obj = reinterpret_cast<TaggedObject *>(addr);
1466         AddrTableItem table;
1467         table.addr = reinterpret_cast<uint64_t>(obj);
1468         table.id = dumpOption.isDumpOOM ?
1469             entryIdMap_->GetNextId() : entryIdMap_->FindOrInsertNodeId(reinterpret_cast<JSTaggedType>(addr));
1470         table.objSize = obj->GetClass()->SizeFromJSHClass(obj);
1471         table.offset = offset;
1472         WriteChunk(reinterpret_cast<char *>(&table), sizeof(AddrTableItem));
1473         if (obj->GetClass()->IsString()) {
1474             offset += sizeof(JSHClass *);
1475         } else {
1476             offset += table.objSize;
1477         }
1478     };
1479     IterateMarkedObjects(objTableDump);
1480     LOG_ECMA(INFO) << "rawheap dump, object total count " << objCnt_;
1481 }
1482 
DumpObjectMemory()1483 void RawHeapDump::DumpObjectMemory()
1484 {
1485     uint32_t startOffset = static_cast<uint32_t>(fileOffset_);
1486     auto objMemDump = [this](void *addr) {
1487         auto obj = reinterpret_cast<TaggedObject *>(addr);
1488         size_t size = obj->GetClass()->SizeFromJSHClass(obj);
1489         if (obj->GetClass()->IsString()) {
1490             size = sizeof(JSHClass *);
1491         }
1492         WriteChunk(reinterpret_cast<char *>(obj), size);
1493     };
1494     IterateMarkedObjects(objMemDump);
1495     // 2: means headers
1496     secIndexVec_.push_back(fileOffset_ - startOffset + objCnt_ * sizeof(AddrTableItem) + sizeof(uint32_t) * 2);
1497     LOG_ECMA(INFO) << "rawheap dump, object memory " << fileOffset_ - startOffset;
1498 }
1499 
DumpStringTable()1500 void RawHeapDump::DumpStringTable()
1501 {
1502     for (auto obj : readOnlyObjects_) {
1503         UpdateStringTable(obj);
1504     }
1505     WriteBinBlock();
1506     secIndexVec_.push_back(fileOffset_);
1507     auto size = HeapSnapshotJSONSerializer::DumpStringTable(snapshot_->GetEcmaStringTable(), stream_, strIdMapObjVec_);
1508     fileOffset_ += size;
1509     secIndexVec_.push_back(size);
1510     auto capcity = snapshot_->GetEcmaStringTable()->GetCapcity();
1511     LOG_ECMA(INFO) << "rawheap dump, string table capcity " << capcity << ", size " << size;
1512 }
1513 
DumpSectionIndex()1514 void RawHeapDump::DumpSectionIndex()
1515 {
1516     secIndexVec_.push_back(secIndexVec_.size());
1517     secIndexVec_.push_back(sizeof(uint32_t));
1518     WriteChunk(reinterpret_cast<char *>(secIndexVec_.data()), secIndexVec_.size() * sizeof(uint32_t));
1519     LOG_ECMA(INFO) << "rawheap dump, section count " << secIndexVec_.size();
1520 }
1521 
WriteU64(uint64_t number)1522 void RawHeapDump::WriteU64(uint64_t number)
1523 {
1524     if (PER_GROUP_MEM_SIZE - bufIndex_ < sizeof(uint64_t)) {
1525         stream_->WriteBinBlock(buffer_, bufIndex_);
1526         bufIndex_ = 0;
1527     }
1528     *reinterpret_cast<uint64_t *>(buffer_ + bufIndex_) = number;
1529     bufIndex_ += sizeof(uint64_t);
1530     fileOffset_ += sizeof(uint64_t);
1531 }
1532 
WriteChunk(char * data,size_t size)1533 void RawHeapDump::WriteChunk(char *data, size_t size)
1534 {
1535     while (size > 0) {
1536         MaybeWriteBuffer();
1537         size_t remainderSize = PER_GROUP_MEM_SIZE - bufIndex_;
1538         size_t writeSize = size < remainderSize ? size : remainderSize;
1539         if (writeSize <= 0) {
1540             break;
1541         }
1542         if (memcpy_s(buffer_ + bufIndex_, remainderSize, data, writeSize) != 0) {
1543             LOG_ECMA(ERROR) << "rawheap dump, WriteChunk failed, write size " <<
1544                                writeSize << ", remainder size " << remainderSize;
1545             break;
1546         }
1547         data += writeSize;
1548         size -= writeSize;
1549         bufIndex_ += writeSize;
1550         fileOffset_ += writeSize;
1551     }
1552 }
1553 
MaybeWriteBuffer()1554 void RawHeapDump::MaybeWriteBuffer()
1555 {
1556     if (UNLIKELY(bufIndex_ == PER_GROUP_MEM_SIZE)) {
1557         WriteBinBlock();
1558     }
1559 }
1560 
WriteBinBlock()1561 void RawHeapDump::WriteBinBlock()
1562 {
1563     if (bufIndex_ <= 0) {
1564         return;
1565     }
1566     if (!stream_->WriteBinBlock(buffer_, bufIndex_)) {
1567         LOG_ECMA(ERROR) << "rawheap dump, WriteBinBlock failed, write size " << bufIndex_;
1568     }
1569     bufIndex_ = 0;
1570 }
1571 
1572 
UpdateStringTable(TaggedObject * object)1573 void RawHeapDump::UpdateStringTable(TaggedObject *object)
1574 {
1575     StringId strId = snapshot_->GenerateStringId(object);
1576     if (strId == 1) {  // 1 : invalid str id
1577         return;
1578     }
1579     auto vec = strIdMapObjVec_.find(strId);
1580     if (vec != strIdMapObjVec_.end()) {
1581         vec->second.push_back(reinterpret_cast<uint64_t>(object));
1582     } else {
1583         CVector<uint64_t> objVec;
1584         objVec.push_back(reinterpret_cast<uint64_t>(object));
1585         strIdMapObjVec_.emplace(strId, objVec);
1586     }
1587 }
1588 
HandleRootValue(JSTaggedValue value)1589 void RawHeapDump::HandleRootValue(JSTaggedValue value)
1590 {
1591     if (!value.IsHeapObject()) {
1592         return;
1593     }
1594     TaggedObject *root = value.GetWeakReferentUnChecked();
1595     roots_.insert(root);
1596 }
1597 
IterateMarkedObjects(const std::function<void (void *)> & visitor)1598 void RawHeapDump::IterateMarkedObjects(const std::function<void(void *)> &visitor)
1599 {
1600     auto cb = [&visitor](Region *region) {
1601         region->IterateAllMarkedBits(visitor);
1602     };
1603     vm_->GetHeap()->EnumerateRegions(cb);
1604     SharedHeap::GetInstance()->EnumerateOldSpaceRegions(cb);
1605 
1606     for (auto obj : readOnlyObjects_) {
1607         visitor(obj);
1608     }
1609 }
1610 
ProcessMarkObjectsFromRoot(TaggedObject * root)1611 void RawHeapDump::ProcessMarkObjectsFromRoot(TaggedObject *root)
1612 {
1613     if (!MarkObject(root)) {
1614         return;
1615     }
1616     bfsQueue_.emplace(root);
1617     while (!bfsQueue_.empty()) {
1618         auto object = bfsQueue_.front();
1619         bfsQueue_.pop();
1620         if (object->GetClass()->IsString()) {
1621             MarkObject(reinterpret_cast<TaggedObject*>(object->GetClass()));
1622             continue;
1623         }
1624         ObjectXRay::VisitObjectBody<VisitType::OLD_GC_VISIT>(object, object->GetClass(), *this);
1625     }
1626 }
1627 
MarkObject(TaggedObject * object)1628 bool RawHeapDump::MarkObject(TaggedObject *object)
1629 {
1630     Region *region = Region::ObjectAddressToRange(object);
1631     if (region->InReadOnlySpace() || region->InSharedReadOnlySpace()) {
1632         readOnlyObjects_.insert(object);
1633         return false;
1634     }
1635     if (!region->NonAtomicMark(object)) {
1636         return false;
1637     }
1638     UpdateStringTable(object);
1639     ++objCnt_;
1640     return true;
1641 }
1642 
ClearVisitMark()1643 void RawHeapDump::ClearVisitMark()
1644 {
1645     auto cb = [](Region *region) {
1646         region->ClearMarkGCBitset();
1647     };
1648     vm_->GetHeap()->EnumerateRegions(cb);
1649     SharedHeap::GetInstance()->EnumerateOldSpaceRegions(cb);
1650 }
1651 
EndVisitMark()1652 void RawHeapDump::EndVisitMark()
1653 {
1654     auto cb = [](Region *region) {
1655         if (region->InAppSpawnSpace() || region->InSharedAppSpawnSpace()) {
1656             return;
1657         }
1658         region->ClearMarkGCBitset();
1659     };
1660     vm_->GetHeap()->EnumerateRegions(cb);
1661     SharedHeap::GetInstance()->EnumerateOldSpaceRegions(cb);
1662 }
1663 }  // namespace panda::ecmascript
1664