1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "ecmascript/dfx/hprof/heap_profiler.h"
17
18 #include "ecmascript/base/block_hook_scope.h"
19 #include "ecmascript/dfx/hprof/file_stream.h"
20 #include "ecmascript/dfx/hprof/heap_snapshot.h"
21 #include "ecmascript/ecma_vm.h"
22 #include "ecmascript/js_thread.h"
23 #include "ecmascript/jspandafile/js_pandafile_manager.h"
24 #include "ecmascript/mem/assert_scope.h"
25 #include "ecmascript/mem/concurrent_sweeper.h"
26 #include "ecmascript/mem/heap-inl.h"
27
28 #if defined(ENABLE_DUMP_IN_FAULTLOG)
29 #include "faultloggerd_client.h"
30 #endif
31
32 namespace panda::ecmascript {
FindId(JSTaggedType addr)33 std::pair<bool, uint32_t> EntryIdMap::FindId(JSTaggedType addr)
34 {
35 auto it = idMap_.find(addr);
36 if (it == idMap_.end()) {
37 return std::make_pair(false, GetNextId()); // return nextId if entry not exits
38 } else {
39 return std::make_pair(true, it->second);
40 }
41 }
42
InsertId(JSTaggedType addr,uint32_t id)43 bool EntryIdMap::InsertId(JSTaggedType addr, uint32_t id)
44 {
45 auto it = idMap_.find(addr);
46 if (it == idMap_.end()) {
47 idMap_.emplace(addr, id);
48 return true;
49 }
50 idMap_[addr] = id;
51 return false;
52 }
53
EraseId(JSTaggedType addr)54 bool EntryIdMap::EraseId(JSTaggedType addr)
55 {
56 auto it = idMap_.find(addr);
57 if (it == idMap_.end()) {
58 return false;
59 }
60 idMap_.erase(it);
61 return true;
62 }
63
Move(JSTaggedType oldAddr,JSTaggedType forwardAddr)64 bool EntryIdMap::Move(JSTaggedType oldAddr, JSTaggedType forwardAddr)
65 {
66 if (oldAddr == forwardAddr) {
67 return true;
68 }
69 auto it = idMap_.find(oldAddr);
70 if (it != idMap_.end()) {
71 uint32_t id = it->second;
72 idMap_.erase(it);
73 idMap_[forwardAddr] = id;
74 return true;
75 }
76 return false;
77 }
78
UpdateEntryIdMap(HeapSnapshot * snapshot)79 void EntryIdMap::UpdateEntryIdMap(HeapSnapshot *snapshot)
80 {
81 auto nodes = snapshot->GetNodes();
82 CUnorderedMap<JSTaggedType, uint32_t> newIdMap;
83 for (auto node : *nodes) {
84 auto addr = node->GetAddress();
85 auto it = idMap_.find(addr);
86 if (it != idMap_.end()) {
87 newIdMap.emplace(addr, it->second);
88 }
89 }
90 idMap_.clear();
91 idMap_ = newIdMap;
92 }
93
HeapProfiler(const EcmaVM * vm)94 HeapProfiler::HeapProfiler(const EcmaVM *vm) : vm_(vm), stringTable_(vm), chunk_(vm->GetNativeAreaAllocator())
95 {
96 isProfiling_ = false;
97 entryIdMap_ = GetChunk()->New<EntryIdMap>();
98 }
99
~HeapProfiler()100 HeapProfiler::~HeapProfiler()
101 {
102 JSPandaFileManager::GetInstance()->ClearNameMap();
103 ClearSnapshot();
104 GetChunk()->Delete(entryIdMap_);
105 }
106
AllocationEvent(TaggedObject * address,size_t size)107 void HeapProfiler::AllocationEvent(TaggedObject *address, size_t size)
108 {
109 DISALLOW_GARBAGE_COLLECTION;
110 if (isProfiling_) {
111 // Id will be allocated later while add new node
112 if (heapTracker_ != nullptr) {
113 heapTracker_->AllocationEvent(address, size);
114 }
115 }
116 }
117
MoveEvent(uintptr_t address,TaggedObject * forwardAddress,size_t size)118 void HeapProfiler::MoveEvent(uintptr_t address, TaggedObject *forwardAddress, size_t size)
119 {
120 LockHolder lock(mutex_);
121 if (isProfiling_) {
122 entryIdMap_->Move(static_cast<JSTaggedType>(address), reinterpret_cast<JSTaggedType>(forwardAddress));
123 if (heapTracker_ != nullptr) {
124 heapTracker_->MoveEvent(address, forwardAddress, size);
125 }
126 }
127 }
128
UpdateHeapObjects(HeapSnapshot * snapshot)129 void HeapProfiler::UpdateHeapObjects(HeapSnapshot *snapshot)
130 {
131 vm_->CollectGarbage(TriggerGCType::OLD_GC);
132 vm_->GetHeap()->GetSweeper()->EnsureAllTaskFinished();
133 snapshot->UpdateNodes();
134 }
135
DumpHeapSnapshot(DumpFormat dumpFormat,bool isVmMode,bool isPrivate,bool captureNumericValue,bool isFullGC)136 void HeapProfiler::DumpHeapSnapshot([[maybe_unused]] DumpFormat dumpFormat, [[maybe_unused]] bool isVmMode,
137 [[maybe_unused]] bool isPrivate, [[maybe_unused]] bool captureNumericValue,
138 [[maybe_unused]] bool isFullGC)
139 {
140 #if defined(ENABLE_DUMP_IN_FAULTLOG)
141 // Write in faultlog for heap leak.
142 int32_t fd = RequestFileDescriptor(static_cast<int32_t>(FaultLoggerType::JS_HEAP_SNAPSHOT));
143 if (fd < 0) {
144 LOG_ECMA(ERROR) << "OOM Dump Write FD failed, fd" << fd;
145 return;
146 }
147 FileDescriptorStream stream(fd);
148 DumpHeapSnapshot(dumpFormat, &stream, nullptr, isVmMode, isPrivate, captureNumericValue, isFullGC);
149 #endif
150 }
151
DumpHeapSnapshot(DumpFormat dumpFormat,Stream * stream,Progress * progress,bool isVmMode,bool isPrivate,bool captureNumericValue,bool isFullGC)152 bool HeapProfiler::DumpHeapSnapshot(DumpFormat dumpFormat, Stream *stream, Progress *progress,
153 bool isVmMode, bool isPrivate, bool captureNumericValue, bool isFullGC)
154 {
155 if (isFullGC) {
156 [[maybe_unused]] bool heapClean = ForceFullGC(vm_);
157 ASSERT(heapClean);
158 }
159 base::BlockHookScope blockScope;
160 LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot start";
161 int32_t heapCount = 0;
162 if (isFullGC) {
163 size_t heapSize = vm_->GetHeap()->GetLiveObjectSize();
164 LOG_ECMA(INFO) << "HeapProfiler DumpSnapshot heap size " << heapSize;
165 heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
166 if (progress != nullptr) {
167 progress->ReportProgress(0, heapCount);
168 }
169 }
170 HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::ONE_SHOT, isVmMode, isPrivate, captureNumericValue,
171 false, isFullGC);
172 ASSERT(snapshot != nullptr);
173 entryIdMap_->UpdateEntryIdMap(snapshot);
174 isProfiling_ = true;
175 if (progress != nullptr) {
176 progress->ReportProgress(heapCount, heapCount);
177 }
178 if (!stream->Good()) {
179 FileStream newStream(GenDumpFileName(dumpFormat));
180 auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, &newStream);
181 GetChunk()->Delete(snapshot);
182 return serializerResult;
183 }
184 auto serializerResult = HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
185 GetChunk()->Delete(snapshot);
186 return serializerResult;
187 }
188
StartHeapTracking(double timeInterval,bool isVmMode,Stream * stream,bool traceAllocation,bool newThread)189 bool HeapProfiler::StartHeapTracking(double timeInterval, bool isVmMode, Stream *stream,
190 bool traceAllocation, bool newThread)
191 {
192 HeapSnapshot *snapshot = MakeHeapSnapshot(SampleType::REAL_TIME, isVmMode, false, false, traceAllocation);
193 if (snapshot == nullptr) {
194 return false;
195 }
196 isProfiling_ = true;
197 UpdateHeapObjects(snapshot);
198 heapTracker_ = std::make_unique<HeapTracker>(snapshot, timeInterval, stream);
199 const_cast<EcmaVM *>(vm_)->StartHeapTracking();
200 if (newThread) {
201 heapTracker_->StartTracing();
202 }
203
204 return true;
205 }
206
UpdateHeapTracking(Stream * stream)207 bool HeapProfiler::UpdateHeapTracking(Stream *stream)
208 {
209 if (heapTracker_ == nullptr) {
210 return false;
211 }
212 HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
213 if (snapshot == nullptr) {
214 return false;
215 }
216 snapshot->RecordSampleTime();
217 UpdateHeapObjects(snapshot);
218
219 if (stream != nullptr) {
220 snapshot->PushHeapStat(stream);
221 }
222
223 return true;
224 }
225
StopHeapTracking(Stream * stream,Progress * progress,bool newThread)226 bool HeapProfiler::StopHeapTracking(Stream *stream, Progress *progress, bool newThread)
227 {
228 base::BlockHookScope blockScope;
229 if (heapTracker_ == nullptr) {
230 return false;
231 }
232 int32_t heapCount = static_cast<int32_t>(vm_->GetHeap()->GetHeapObjectCount());
233
234 const_cast<EcmaVM *>(vm_)->StopHeapTracking();
235 if (newThread) {
236 heapTracker_->StopTracing();
237 }
238
239 HeapSnapshot *snapshot = heapTracker_->GetHeapSnapshot();
240 if (snapshot == nullptr) {
241 return false;
242 }
243
244 if (progress != nullptr) {
245 progress->ReportProgress(0, heapCount);
246 }
247 snapshot->FinishSnapshot();
248 isProfiling_ = false;
249 if (progress != nullptr) {
250 progress->ReportProgress(heapCount, heapCount);
251 }
252 return HeapSnapshotJSONSerializer::Serialize(snapshot, stream);
253 }
254
GenDumpFileName(DumpFormat dumpFormat)255 std::string HeapProfiler::GenDumpFileName(DumpFormat dumpFormat)
256 {
257 CString filename("hprof_");
258 switch (dumpFormat) {
259 case DumpFormat::JSON:
260 filename.append(GetTimeStamp());
261 break;
262 case DumpFormat::BINARY:
263 filename.append("unimplemented");
264 break;
265 case DumpFormat::OTHER:
266 filename.append("unimplemented");
267 break;
268 default:
269 filename.append("unimplemented");
270 break;
271 }
272 filename.append(".heapsnapshot");
273 return ConvertToStdString(filename);
274 }
275
GetTimeStamp()276 CString HeapProfiler::GetTimeStamp()
277 {
278 std::time_t timeSource = std::time(nullptr);
279 struct tm tm {
280 };
281 struct tm *timeData = localtime_r(&timeSource, &tm);
282 if (timeData == nullptr) {
283 LOG_FULL(FATAL) << "localtime_r failed";
284 UNREACHABLE();
285 }
286 CString stamp;
287 const int TIME_START = 1900;
288 stamp.append(ToCString(timeData->tm_year + TIME_START))
289 .append("-")
290 .append(ToCString(timeData->tm_mon + 1))
291 .append("-")
292 .append(ToCString(timeData->tm_mday))
293 .append("_")
294 .append(ToCString(timeData->tm_hour))
295 .append("-")
296 .append(ToCString(timeData->tm_min))
297 .append("-")
298 .append(ToCString(timeData->tm_sec));
299 return stamp;
300 }
301
ForceFullGC(const EcmaVM * vm)302 bool HeapProfiler::ForceFullGC(const EcmaVM *vm)
303 {
304 if (vm->IsInitialized()) {
305 const_cast<Heap *>(vm->GetHeap())->CollectGarbage(TriggerGCType::FULL_GC);
306 return true;
307 }
308 return false;
309 }
310
MakeHeapSnapshot(SampleType sampleType,bool isVmMode,bool isPrivate,bool captureNumericValue,bool traceAllocation,bool isFullGC)311 HeapSnapshot *HeapProfiler::MakeHeapSnapshot(SampleType sampleType, bool isVmMode, bool isPrivate,
312 bool captureNumericValue, bool traceAllocation, bool isFullGC)
313 {
314 base::BlockHookScope blockScope;
315 LOG_ECMA(INFO) << "HeapProfiler::MakeHeapSnapshot";
316 if (isFullGC) {
317 DISALLOW_GARBAGE_COLLECTION;
318 const_cast<Heap *>(vm_->GetHeap())->Prepare();
319 }
320 switch (sampleType) {
321 case SampleType::ONE_SHOT: {
322 auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), isVmMode, isPrivate,
323 captureNumericValue, traceAllocation, entryIdMap_,
324 GetChunk());
325 if (snapshot == nullptr) {
326 LOG_FULL(FATAL) << "alloc snapshot failed";
327 UNREACHABLE();
328 }
329 snapshot->BuildUp();
330 return snapshot;
331 }
332 case SampleType::REAL_TIME: {
333 auto *snapshot = GetChunk()->New<HeapSnapshot>(vm_, GetEcmaStringTable(), isVmMode, isPrivate,
334 captureNumericValue, traceAllocation, entryIdMap_,
335 GetChunk());
336 if (snapshot == nullptr) {
337 LOG_FULL(FATAL) << "alloc snapshot failed";
338 UNREACHABLE();
339 }
340 AddSnapshot(snapshot);
341 snapshot->PrepareSnapshot();
342 return snapshot;
343 }
344 default:
345 return nullptr;
346 }
347 }
348
AddSnapshot(HeapSnapshot * snapshot)349 void HeapProfiler::AddSnapshot(HeapSnapshot *snapshot)
350 {
351 if (hprofs_.size() >= MAX_NUM_HPROF) {
352 ClearSnapshot();
353 }
354 ASSERT(snapshot != nullptr);
355 hprofs_.emplace_back(snapshot);
356 }
357
ClearSnapshot()358 void HeapProfiler::ClearSnapshot()
359 {
360 for (auto *snapshot : hprofs_) {
361 GetChunk()->Delete(snapshot);
362 }
363 hprofs_.clear();
364 }
365
StartHeapSampling(uint64_t samplingInterval,int stackDepth)366 bool HeapProfiler::StartHeapSampling(uint64_t samplingInterval, int stackDepth)
367 {
368 if (heapSampling_.get()) {
369 LOG_ECMA(ERROR) << "Do not start heap sampling twice in a row.";
370 return false;
371 }
372 heapSampling_ = std::make_unique<HeapSampling>(vm_, const_cast<Heap *>(vm_->GetHeap()),
373 samplingInterval, stackDepth);
374 return true;
375 }
376
StopHeapSampling()377 void HeapProfiler::StopHeapSampling()
378 {
379 heapSampling_.reset();
380 }
381
GetAllocationProfile()382 const struct SamplingInfo *HeapProfiler::GetAllocationProfile()
383 {
384 if (!heapSampling_.get()) {
385 LOG_ECMA(ERROR) << "Heap sampling was not started, please start firstly.";
386 return nullptr;
387 }
388 return heapSampling_->GetAllocationProfile();
389 }
390 } // namespace panda::ecmascript
391