1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "stack_preprocess.h"
17
18 #include <elf.h>
19 #include <unistd.h>
20
21 #include "common.h"
22 #include "logging.h"
23 #include "plugin_service_types.pb.h"
24 #include "dfx_elf.h"
25 #include "utilities.h"
26 #include "native_hook_result_standard.pb.h"
27 #include "native_hook_config_standard.pb.h"
28 #include "google/protobuf/text_format.h"
29 #include "trace_file_writer.h"
30
31
32 constexpr static uint32_t SC_LG_TINY_MIN = 3;
33 constexpr static uint32_t LG_QUANTUM = 4;
34 constexpr static uint32_t SC_NTINY = LG_QUANTUM - SC_LG_TINY_MIN;
35 constexpr static uint32_t SC_LG_TINY_MAXCLASS = (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1);
36 constexpr static uint32_t SC_LG_NGROUP = 2;
37 constexpr static uint32_t LG_SIZE_CLASS_GROUP = 2;
38 constexpr static uint32_t NTBINS = 1;
39 constexpr static uint32_t LG_TINY_MAXCLASS = 3;
40 constexpr static uint32_t MAX_MATCH_CNT = 1000;
41 constexpr static uint32_t MAX_MATCH_INTERVAL = 3600;
42 constexpr static uint32_t LOG_PRINT_TIMES = 10000;
43 constexpr static uint32_t WAIT_STOP_TIME = 5000;
44 constexpr static uint32_t WAIT_TIME_ONCE = 10;
45 constexpr static uint32_t MAX_BATCH_CNT = 40;
46 constexpr static uint32_t RIGHT_MOVE_1 = 1;
47 constexpr static uint32_t RIGHT_MOVE_2 = 2;
48 constexpr static uint32_t RIGHT_MOVE_4 = 4;
49 constexpr static uint32_t RIGHT_MOVE_8 = 8;
50 constexpr static uint32_t RIGHT_MOVE_16 = 16;
51 constexpr static uint64_t SIZE_MASK = 0xFFFFFF0000000000;
52 constexpr static uint64_t JS_OFFLINE_IP_MASK = 0xFFFFFE0000000000;
53 constexpr static uint64_t DWARF_ERROR_ID = 999999;
54 constexpr static uint64_t DWARF_NAPI_CALLBACK = 999999;
55 static std::string JS_CALL_STACK_DEPTH_SEP = ","; // ',' is js call stack depth separator
56 static std::string JS_SYMBOL_FILEPATH_SEP = "|"; // '|' is js symbol and filepath separator
57 constexpr static int NAPI_CALL_STACK = 2; // just for napi call stack
58 constexpr static uint32_t FRAME_DEPTH = 2; // add two frames
59 #ifdef PERFORMANCE_DEBUG
60 constexpr static uint32_t LONG_TIME_THRESHOLD = 1000000;
61 static std::atomic<uint64_t> timeCost = 0;
62 static std::atomic<uint64_t> unwindTimes = 0;
63 #endif
64
65 using namespace OHOS::Developtools::NativeDaemon;
66 using namespace OHOS::HiviewDFX;
67 using namespace OHOS::Developtools::Profiler;
68
StackPreprocess(const StackDataRepeaterPtr & dataRepeater,const NativeHookConfig & hookConfig,clockid_t pluginDataClockId,FILE * fpHookData,bool isHookStandalone,bool isSaService,bool isProtobufSerialize)69 StackPreprocess::StackPreprocess(const StackDataRepeaterPtr& dataRepeater, const NativeHookConfig& hookConfig,
70 clockid_t pluginDataClockId, FILE* fpHookData, bool isHookStandalone, bool isSaService, bool isProtobufSerialize)
71 : dataRepeater_(dataRepeater), hookConfig_(hookConfig), pluginDataClockId_(pluginDataClockId),
72 fpHookData_(fpHookData), isHookStandaloneSerialize_(isHookStandalone), isSaService_(isSaService),
73 isProtobufSerialize_(isProtobufSerialize)
74 {
75 runtime_instance = std::make_shared<VirtualRuntime>(hookConfig_);
76
77 if (hookConfig_.malloc_free_matching_interval() > MAX_MATCH_INTERVAL) {
78 PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_interval());
79 hookConfig_.set_malloc_free_matching_interval(MAX_MATCH_INTERVAL);
80 }
81
82 if (hookConfig_.malloc_free_matching_cnt() > MAX_MATCH_CNT) {
83 PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_cnt());
84 hookConfig_.set_malloc_free_matching_cnt(MAX_MATCH_CNT);
85 }
86 PROFILER_LOG_INFO(LOG_CORE, "malloc_free_matching_interval = %d malloc_free_matching_cnt = %d\n",
87 hookConfig_.malloc_free_matching_interval(), hookConfig_.malloc_free_matching_cnt());
88
89 if (hookConfig_.statistics_interval() > 0) {
90 statisticsInterval_ = std::chrono::seconds(hookConfig_.statistics_interval());
91 recordStatisticsMap_.reserve(STATISTICS_MAP_SZIE);
92 statisticsPeriodData_.reserve(STATISTICS_PERIOD_DATA_SIZE);
93 allocAddrMap_.reserve(ALLOC_ADDRMAMP_SIZE);
94 }
95 if (hookConfig_.malloc_free_matching_interval() > 0) {
96 applyAndReleaseMatchInterval_ = std::chrono::seconds(hookConfig_.malloc_free_matching_interval());
97 applyAndReleaseMatchIntervallMap_.reserve(MATCH_ADDRMAMP_SIZE);
98 }
99 PROFILER_LOG_INFO(LOG_CORE, "statistics_interval = %d statisticsInterval_ = %lld \n",
100 hookConfig_.statistics_interval(), statisticsInterval_.count());
101 PROFILER_LOG_INFO(LOG_CORE, "applyAndReleaseMatchInterval_ = %lld", applyAndReleaseMatchInterval_.count());
102 hookDataClockId_ = COMMON::GetClockId(hookConfig_.clock());
103 PROFILER_LOG_INFO(LOG_CORE, "StackPreprocess(): pluginDataClockId = %d hookDataClockId = %d \n",
104 pluginDataClockId_, hookDataClockId_);
105 if (hookConfig_.save_file() && fpHookData_ == nullptr) {
106 PROFILER_LOG_ERROR(LOG_CORE, "If you need to save the file, please set the file_name");
107 }
108 PROFILER_LOG_INFO(LOG_CORE, "isHookStandaloneSerialize_ = %d", isHookStandaloneSerialize_);
109 #if defined(__arm__)
110 u64regs_.resize(PERF_REG_ARM_MAX);
111 #else
112 u64regs_.resize(PERF_REG_ARM64_MAX);
113 #endif
114 callFrames_.reserve(hookConfig_.max_stack_depth() + hookConfig_.max_js_stack_depth());
115 if (hookConfig_.fp_unwind() && hookConfig_.js_stack_report() > 0) {
116 fpJsCallStacks_.reserve(hookConfig_.max_js_stack_depth());
117 }
118 }
119
~StackPreprocess()120 StackPreprocess::~StackPreprocess()
121 {
122 isStopTakeData_ = true;
123 if (dataRepeater_) {
124 dataRepeater_->Close();
125 }
126 if (thread_.joinable()) {
127 thread_.join();
128 }
129 runtime_instance = nullptr;
130 fpHookData_ = nullptr;
131 }
132
FinishTraceFile()133 void StackPreprocess::FinishTraceFile()
134 {
135 if (isSaService_) {
136 std::shared_ptr<TraceFileWriter> tfPtr = std::static_pointer_cast<TraceFileWriter>(writer_);
137 tfPtr->SetDurationTime();
138 tfPtr->Finish();
139 }
140 }
141
SetWriter(const std::shared_ptr<Writer> & writer)142 void StackPreprocess::SetWriter(const std::shared_ptr<Writer>& writer)
143 {
144 writer_ = writer;
145 if (!isSaService_) {
146 stackData_ = BatchNativeHookData();
147 }
148 }
149
SetWriter(const WriterStructPtr & writer)150 void StackPreprocess::SetWriter(const WriterStructPtr& writer)
151 {
152 resultWriter_ = writer;
153 auto ctx = resultWriter_->startReport(resultWriter_);
154 if (ctx == nullptr) {
155 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
156 return;
157 }
158 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
159 }
160
161
StartTakeResults()162 bool StackPreprocess::StartTakeResults()
163 {
164 CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
165
166 std::thread demuxer([this] { this->TakeResults(); });
167 CHECK_TRUE(demuxer.get_id() != std::thread::id(), false, "demuxer thread invalid");
168
169 thread_ = std::move(demuxer);
170 isStopTakeData_ = false;
171 return true;
172 }
173
StopTakeResults()174 bool StackPreprocess::StopTakeResults()
175 {
176 PROFILER_LOG_INFO(LOG_CORE, "start StopTakeResults");
177 int32_t timerFd = scheduleTaskManager_.ScheduleTask(
178 std::bind(&StackPreprocess::ForceStop, this), WAIT_STOP_TIME, true, false);
179 if (timerFd == -1) {
180 PROFILER_LOG_ERROR(LOG_CORE, "StopTakeResults ScheduleTask failed!");
181 return false;
182 }
183 if (!dataRepeater_) {
184 while (!isStopTakeData_) {
185 std::this_thread::sleep_for(std::chrono::milliseconds(WAIT_TIME_ONCE));
186 }
187 return true;
188 }
189 CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
190 CHECK_TRUE(thread_.get_id() != std::thread::id(), false, "thread invalid");
191
192 PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join");
193
194 if (thread_.joinable()) {
195 thread_.join();
196 }
197 PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join success");
198 return true;
199 }
200
IntervalFlushRecordStatistics()201 inline void StackPreprocess::IntervalFlushRecordStatistics()
202 {
203 // interval reporting statistics
204 if (hookConfig_.statistics_interval() > 0) {
205 auto currentTime = std::chrono::steady_clock::now();
206 auto elapsedTime = std::chrono::duration_cast<std::chrono::microseconds>(currentTime - lastStatisticsTime_);
207 if (elapsedTime >= statisticsInterval_) {
208 lastStatisticsTime_ = currentTime;
209 FlushRecordStatistics();
210 }
211 }
212 }
213
IntervalFlushApplyAndReleaseMatchData()214 inline void StackPreprocess::IntervalFlushApplyAndReleaseMatchData()
215 {
216 // interval reporting apply and release match data
217 if (hookConfig_.malloc_free_matching_interval() > 0) {
218 static auto lastStatisticsTime = std::chrono::steady_clock::now();
219 auto currentTime = std::chrono::steady_clock::now();
220 auto elapsedTime = std::chrono::duration_cast<std::chrono::seconds>(currentTime - lastStatisticsTime);
221 if (elapsedTime >= applyAndReleaseMatchInterval_) {
222 lastStatisticsTime = currentTime;
223 FlushRecordApplyAndReleaseMatchData();
224 }
225 }
226 }
227
HandleNoStackEvent(RawStackPtr & rawData)228 bool StackPreprocess::HandleNoStackEvent(RawStackPtr& rawData)
229 {
230 if (rawData->stackConext->type == MMAP_FILE_TYPE) {
231 BaseStackRawData* mmapRawData = rawData->stackConext;
232 std::string filePath(reinterpret_cast<char *>(rawData->data));
233 COMMON::AdaptSandboxPath(filePath, rawData->stackConext->pid);
234 PROFILER_LOG_DEBUG(LOG_CORE, "MMAP_FILE_TYPE curMmapAddr=%p, MAP_FIXED=%d, "
235 "PROT_EXEC=%d, offset=%" PRIu64 ", filePath=%s",
236 mmapRawData->addr, mmapRawData->mmapArgs.flags & MAP_FIXED,
237 mmapRawData->mmapArgs.flags & PROT_EXEC, mmapRawData->mmapArgs.offset, filePath.data());
238 std::lock_guard<std::mutex> guard(mtx_);
239 runtime_instance->HandleMapInfo({reinterpret_cast<uint64_t>(mmapRawData->addr),
240 mmapRawData->mallocSize, mmapRawData->mmapArgs.flags, mmapRawData->mmapArgs.offset}, filePath,
241 rawData->stackConext->pid, rawData->stackConext->tid);
242 flushBasicData_ = true;
243 } else if (rawData->stackConext->type == THREAD_NAME_MSG) {
244 std::string threadName = reinterpret_cast<char*>(rawData->data);
245 ReportThreadNameMap(rawData->stackConext->tid, threadName);
246 } else {
247 return false;
248 }
249 return true;
250 }
251
ForceStop()252 void StackPreprocess::ForceStop()
253 {
254 isStopTakeData_ = true;
255 if (dataRepeater_ != nullptr) {
256 dataRepeater_->Close();
257 }
258 }
259
TakeResultsFromShmem(const std::shared_ptr<EventNotifier> & eventNotifier,const std::shared_ptr<ShareMemoryBlock> & shareMemoryBlock)260 void StackPreprocess::TakeResultsFromShmem(const std::shared_ptr<EventNotifier>& eventNotifier,
261 const std::shared_ptr<ShareMemoryBlock>& shareMemoryBlock)
262 {
263 eventNotifier->Take();
264 StackDataRepeater::RawStack rawStack;
265 RawStackPtr rawData(&rawStack, [](StackDataRepeater::RawStack* del) {});
266 while (!isStopTakeData_) {
267 bool ret = shareMemoryBlock->TakeData(
268 [&](const int8_t data[], uint32_t size) -> bool {
269 #ifdef PERFORMANCE_DEBUG
270 struct timespec start = {};
271 clock_gettime(CLOCK_REALTIME, &start);
272 #endif
273 if (size == sizeof(uint64_t)) {
274 uint64_t addr = *reinterpret_cast<uint64_t *>(const_cast<int8_t *>(data));
275 SetFreeStatisticsData(addr);
276 #ifdef PERFORMANCE_DEBUG
277 struct timespec end = {};
278 clock_gettime(CLOCK_REALTIME, &end);
279 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
280 (end.tv_nsec - start.tv_nsec);
281 timeCost += curTimeCost;
282 unwindTimes++;
283 if (unwindTimes % LOG_PRINT_TIMES == 0) {
284 PROFILER_LOG_ERROR(LOG_CORE,
285 "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
286 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
287 }
288 #endif
289 return true;
290 }
291 CHECK_TRUE(size >= sizeof(BaseStackRawData), false, "stack data invalid!");
292 rawData->stackConext = reinterpret_cast<BaseStackRawData *>(const_cast<int8_t *>(data));
293 rawData->data = reinterpret_cast<uint8_t*>(const_cast<int8_t *>(data)) + sizeof(BaseStackRawData);
294 rawData->fpDepth = (size - sizeof(BaseStackRawData)) / sizeof(uint64_t);
295 if (isStopTakeData_) {
296 return false;
297 } else if (rawData->stackConext->type == MEMORY_TAG) {
298 std::string tagName = reinterpret_cast<char*>(rawData->data);
299 SaveMemTag(rawData->stackConext->tagId, tagName);
300 return true;
301 } else if (HandleNoStackEvent(rawData)) {
302 return true;
303 } else if (rawData->stackConext->type == MUNMAP_MSG) {
304 std::lock_guard<std::mutex> guard(mtx_);
305 runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
306 } else if (rawData->stackConext->type == NMD_MSG) {
307 const char* nmdResult = reinterpret_cast<const char*>(rawData->data);
308 lseek(nmdFd_, 0, SEEK_END);
309 (void)write(nmdFd_, nmdResult, strlen(nmdResult));
310 return true;
311 } else if (rawData->stackConext->type == END_MSG) {
312 isStopTakeData_ = true;
313 return true;
314 }
315 {
316 std::lock_guard<std::mutex> guard(mtx_);
317 runtime_instance->UpdateThread(rawData->stackConext->pid, rawData->stackConext->tid);
318 }
319 ReportOfflineSymbolizationData();
320 std::visit([&](auto& stackData) {
321 SetHookData(rawData, stackData);
322 FlushCheck(stackData);
323 }, stackData_);
324 IntervalFlushRecordStatistics();
325 #ifdef PERFORMANCE_DEBUG
326 struct timespec end = {};
327 clock_gettime(CLOCK_REALTIME, &end);
328 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
329 (end.tv_nsec - start.tv_nsec);
330 if (curTimeCost >= LONG_TIME_THRESHOLD) {
331 PROFILER_LOG_ERROR(LOG_CORE, "bigTimeCost %" PRIu64 " event=%d fpDepth=%u",
332 curTimeCost, rawData->stackConext->type, rawData->fpDepth);
333 }
334 timeCost += curTimeCost;
335 unwindTimes++;
336 if (unwindTimes % LOG_PRINT_TIMES == 0) {
337 PROFILER_LOG_ERROR(LOG_CORE, "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
338 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
339 }
340 #endif
341 return true;
342 });
343 if (!ret) {
344 break;
345 }
346 }
347 }
348
TakeResults()349 void StackPreprocess::TakeResults()
350 {
351 if (!dataRepeater_) {
352 return;
353 }
354
355 size_t minStackDepth = hookConfig_.max_stack_depth() > MIN_STACK_DEPTH
356 ? MIN_STACK_DEPTH : hookConfig_.max_stack_depth();
357 if (hookConfig_.blocked()) {
358 minStackDepth = static_cast<size_t>(hookConfig_.max_stack_depth());
359 }
360 minStackDepth += FILTER_STACK_DEPTH;
361 PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, start!", gettid());
362 while (1) {
363 RawStackPtr batchRawStack[MAX_BATCH_CNT] = {nullptr};
364 if (isStopTakeData_) {
365 break;
366 }
367 uint32_t during = 0;
368 if (hookConfig_.statistics_interval() > 0) {
369 auto currentTime = std::chrono::steady_clock::now();
370 auto timeDiff = std::chrono::duration_cast<std::chrono::milliseconds>(currentTime - lastStatisticsTime_);
371 int tempDuring =
372 std::chrono::duration_cast<std::chrono::milliseconds>(statisticsInterval_).count() - timeDiff.count();
373 during = tempDuring > 0 ? static_cast<uint32_t>(tempDuring) : 0;
374 }
375 bool isTimeOut = false;
376 auto result = dataRepeater_->TakeRawData(during, hookDataClockId_, MAX_BATCH_CNT, batchRawStack,
377 hookConfig_.statistics_interval(), isTimeOut);
378 if (hookConfig_.statistics_interval() > 0 && isTimeOut && result == nullptr) { // statistics mode
379 IntervalFlushRecordStatistics();
380 continue;
381 }
382 if (!result) {
383 break;
384 }
385 for (unsigned int i = 0; i < MAX_BATCH_CNT; i++) {
386 auto rawData = batchRawStack[i];
387 if (!rawData || isStopTakeData_) {
388 break;
389 }
390 if (rawData->baseStackData == nullptr) {
391 if (rawData->freeData) {
392 SetFreeStatisticsData(rawData->freeData);
393 }
394 dataRepeater_->ReturnRawStack(rawData);
395 continue;
396 }
397 if (rawData->stackConext->type == NMD_MSG) {
398 dataRepeater_->ReturnRawStack(rawData);
399 continue;
400 } else if (rawData->stackConext->type == END_MSG) {
401 isStopTakeData_ = true;
402 break;
403 }
404 #ifdef PERFORMANCE_DEBUG
405 struct timespec start = {};
406 clock_gettime(CLOCK_REALTIME, &start);
407 #endif
408 if (HandleNoStackEvent(rawData)) {
409 dataRepeater_->ReturnRawStack(rawData);
410 continue;
411 } else if (rawData->stackConext->type == MUNMAP_MSG) {
412 std::lock_guard<std::mutex> guard(mtx_);
413 runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
414 }
415
416 if (!rawData->reportFlag) {
417 ignoreCnts_++;
418 if (ignoreCnts_ % LOG_PRINT_TIMES == 0) {
419 PROFILER_LOG_INFO(LOG_CORE, "ignoreCnts_ = %d quene size = %zu\n",
420 ignoreCnts_, dataRepeater_->Size());
421 }
422 dataRepeater_->ReturnRawStack(rawData);
423 continue;
424 }
425 eventCnts_++;
426 if (eventCnts_ % LOG_PRINT_TIMES == 0) {
427 PROFILER_LOG_INFO(LOG_CORE, "eventCnts_ = %d quene size = %zu\n", eventCnts_, dataRepeater_->Size());
428 }
429 callFrames_.clear();
430 if (hookConfig_.fp_unwind()) {
431 FillFpNativeIp(rawData);
432 if (rawData->stackConext->jsChainId > 0 && rawData->jsStackData && hookConfig_.js_stack_report() > 0) {
433 FillFpJsData(rawData);
434 }
435 } else if (rawData->stackConext->type != PR_SET_VMA_MSG) {
436 if (rawData->stackSize == 0) {
437 FillDwarfErrorStack();
438 } else {
439 #if defined(__arm__)
440 uint32_t *regAddrArm = reinterpret_cast<uint32_t *>(rawData->data);
441 u64regs_.assign(regAddrArm, regAddrArm + PERF_REG_ARM_MAX);
442 #else
443 if (memcpy_s(u64regs_.data(), sizeof(uint64_t) * PERF_REG_ARM64_MAX, rawData->data,
444 sizeof(uint64_t) * PERF_REG_ARM64_MAX) != EOK) {
445 PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s regs failed");
446 }
447 #endif
448 }
449 }
450 #ifdef PERFORMANCE_DEBUG
451 size_t realFrameDepth = callFrames_.size();
452 #endif
453 size_t stackDepth = ((size_t)hookConfig_.max_stack_depth() > MAX_CALL_FRAME_UNWIND_SIZE)
454 ? MAX_CALL_FRAME_UNWIND_SIZE
455 : hookConfig_.max_stack_depth() + FILTER_STACK_DEPTH;
456 if (rawData->reduceStackFlag) {
457 stackDepth = minStackDepth;
458 }
459 if ((hookConfig_.fp_unwind()) || rawData->stackSize > 0) {
460 std::lock_guard<std::mutex> guard(mtx_);
461 if (rawData->stackConext->type != PR_SET_VMA_MSG) {
462 bool ret = runtime_instance->UnwindStack(u64regs_, rawData->stackData, rawData->stackSize,
463 rawData->stackConext->pid, rawData->stackConext->tid, callFrames_, stackDepth);
464 if (!ret) {
465 PROFILER_LOG_ERROR(LOG_CORE, "unwind fatal error");
466 dataRepeater_->ReturnRawStack(rawData);
467 continue;
468 }
469 }
470 }
471 if ((hookConfig_.fp_unwind()) || rawData->stackSize > 0) {
472 ReportOfflineSymbolizationData();
473 }
474 std::visit([&](auto& stackData) {
475 if (hookConfig_.save_file() && hookConfig_.file_name() != "" && isHookStandaloneSerialize_) {
476 SetHookData(rawData, callFrames_, stackData);
477 } else if (hookConfig_.save_file() && hookConfig_.file_name() != "") {
478 WriteFrames(rawData, callFrames_);
479 } else if (!hookConfig_.save_file()) {
480 if (hookConfig_.malloc_free_matching_interval() > 0) {
481 SetApplyAndReleaseMatchFrame(rawData, callFrames_, stackData);
482 } else {
483 SetHookData(rawData, callFrames_, stackData);
484 }
485 }
486 }, stackData_);
487
488 #ifdef PERFORMANCE_DEBUG
489 struct timespec end = {};
490 clock_gettime(CLOCK_REALTIME, &end);
491 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
492 (end.tv_nsec - start.tv_nsec);
493 if (curTimeCost >= LONG_TIME_THRESHOLD) {
494 PROFILER_LOG_ERROR(LOG_CORE, "bigTimeCost %" PRIu64 " event=%d, realFrameDepth=%zu, "
495 "callFramesDepth=%zu\n",
496 curTimeCost, rawData->stackConext->type, realFrameDepth, callFrames_.size());
497 }
498 timeCost += curTimeCost;
499 unwindTimes++;
500 if (unwindTimes % LOG_PRINT_TIMES == 0) {
501 PROFILER_LOG_ERROR(LOG_CORE, "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
502 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
503 }
504 #endif
505 dataRepeater_->ReturnRawStack(rawData);
506 } // for
507 if (hookConfig_.save_file() && hookConfig_.file_name() != "" && !isHookStandaloneSerialize_) {
508 continue;
509 }
510 if (hookConfig_.statistics_interval() == 0) {
511 std::visit([&](auto& stackData) {
512 FlushCheck(stackData);
513 }, stackData_);
514 }
515 IntervalFlushRecordStatistics();
516 IntervalFlushApplyAndReleaseMatchData();
517 } // while
518 PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, exit!", gettid());
519 }
520
ReportThreadNameMap(uint32_t tid,const std::string & tname)521 inline void StackPreprocess::ReportThreadNameMap(uint32_t tid, const std::string& tname)
522 {
523 std::lock_guard<std::mutex> guard(mtx_);
524 auto it = threadNameMap_.find(tid);
525 if (it == threadNameMap_.end() || it->second != tname) {
526 threadNameMap_[tid] = tname;
527 std::visit([&](auto& stackData) {
528 auto hookData = stackData.add_events();
529 auto thread = hookData->mutable_thread_name_map();
530 thread->set_id(tid);
531 thread->set_name(tname);
532 thread->set_pid(pid_);
533 FlushCheck(stackData);
534 }, stackData_);
535 }
536 }
537
538 template <typename T>
FillOfflineCallStack(std::vector<CallFrame> & callFrames,size_t idx,T & stackData)539 inline void StackPreprocess::FillOfflineCallStack(std::vector<CallFrame>& callFrames, size_t idx, T& stackData)
540 {
541 for (; idx < callFrames.size(); ++idx) {
542 if (callFrames[idx].isJsFrame_) {
543 ReportFrameMap(callFrames[idx], stackData);
544 callStack_.push_back(callFrames[idx].callFrameId_ | JS_OFFLINE_IP_MASK);
545 continue;
546 }
547 callStack_.push_back(callFrames[idx].ip_);
548 }
549 }
550
551 template <typename T>
FillCallStack(std::vector<CallFrame> & callFrames,size_t idx,T & stackData)552 inline void StackPreprocess::FillCallStack(std::vector<CallFrame>& callFrames, size_t idx, T& stackData)
553 {
554 for (; idx < callFrames.size(); ++idx) {
555 ReportFrameMap(callFrames[idx], stackData);
556 // for call stack id
557 callStack_.push_back(callFrames[idx].callFrameId_);
558 }
559 }
560
FindCallStackId(std::vector<uint64_t> & callStack)561 inline uint32_t StackPreprocess::FindCallStackId(std::vector<uint64_t>& callStack)
562 {
563 if (hookConfig_.response_library_mode()) {
564 auto itStack = responseLibraryMap_.find(callStack[0]);
565 if (itStack != responseLibraryMap_.end()) {
566 return itStack->second;
567 }
568 } else {
569 auto itStack = callStackMap_.find(callStack);
570 if (itStack != callStackMap_.end()) {
571 return itStack->second;
572 }
573 }
574 return 0;
575 }
576
577 /**
578 * @return '0' is invalid stack id, '> 0' is valid stack id
579 */
580 template <typename T>
SetCallStackMap(T & stackData)581 inline uint32_t StackPreprocess::SetCallStackMap(T& stackData)
582 {
583 uint32_t stackId = 0;
584 auto hookData = stackData.add_events();
585 auto stackmap = hookData->mutable_stack_map();
586 if (hookConfig_.response_library_mode()) {
587 stackId = responseLibraryMap_.size() + 1;
588 } else {
589 stackId = callStackMap_.size() + 1;
590 }
591 stackmap->set_id(stackId);
592 // offline symbolization use ip, other use frame_map_id
593 if (hookConfig_.offline_symbolization()) {
594 if constexpr (std::is_same<T, ProtoEncoder::BatchNativeHookData>::value) {
595 stackmap->add_ip(callStack_);
596 } else {
597 for (size_t i = 0; i < callStack_.size(); i++) {
598 stackmap->add_ip(callStack_[i]);
599 }
600 }
601 } else {
602 if constexpr (std::is_same<T, ProtoEncoder::BatchNativeHookData>::value) {
603 stackmap->add_frame_map_id(callStack_);
604 } else {
605 for (size_t i = 0; i < callStack_.size(); i++) {
606 stackmap->add_frame_map_id(callStack_[i]);
607 }
608 }
609 }
610 stackmap->set_pid(pid_);
611 if (hookConfig_.response_library_mode()) {
612 responseLibraryMap_[callStack_[0]] = stackId;
613 } else {
614 callStackMap_[callStack_] = stackId;
615 }
616 return stackId;
617 }
618
619 /**
620 * @return '0' is invalid stack id, '> 0' is valid stack id
621 */
622 template <typename T>
GetCallStackId(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T & stackData)623 inline uint32_t StackPreprocess::GetCallStackId(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
624 T& stackData)
625 {
626 // ignore the first two frame if dwarf unwind
627 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
628 // if free_stack_report or munmap_stack_report is false, don't need to record.
629 if ((rawStack->stackConext->type == FREE_MSG) && !hookConfig_.free_stack_report()) {
630 return 0;
631 } else if ((rawStack->stackConext->type == MUNMAP_MSG) && !hookConfig_.munmap_stack_report()) {
632 return 0;
633 }
634 callStack_.clear();
635 bool isNapi = false;
636 if (rawStack->stackConext->type == MEMORY_USING_MSG && hookConfig_.js_stack_report() == NAPI_CALL_STACK) {
637 std::string tagName;
638 GetMemTag(rawStack->stackConext->tagId, tagName);
639 if (tagName.find("napi") != std::string::npos) {
640 callStack_.reserve(callFrames.size() + 1); // 1 : insert a frame
641 callStack_.push_back((DWARF_NAPI_CALLBACK + napiIndex_) | JS_OFFLINE_IP_MASK);
642 isNapi = true;
643 }
644 } else {
645 callStack_.reserve(callFrames.size());
646 }
647 if (!hookConfig_.offline_symbolization()) {
648 FillCallStack(callFrames, idx, stackData);
649 } else {
650 if ((!hookConfig_.fp_unwind()) && rawStack->stackSize == 0) {
651 idx = 0;
652 }
653 FillOfflineCallStack(callFrames, idx, stackData);
654 }
655 if (isNapi) {
656 // insert a frame
657 std::string tagName;
658 GetMemTag(rawStack->stackConext->tagId, tagName);
659 FillNapiStack(tagName, callFrames, napiIndex_);
660 ReportFrameMap(callFrames.back(), stackData);
661 ++napiIndex_;
662 }
663 // return call stack id
664 std::lock_guard<std::mutex> guard(mtx_);
665 uint32_t stackId = FindCallStackId(callStack_);
666 if (stackId > 0) {
667 return stackId;
668 } else {
669 return SetCallStackMap(stackData);
670 }
671 }
672
673 template <typename T>
SetEventFrame(const ReportEventBaseData & rawStack,T * event,uint32_t stackMapId,const std::string & type)674 void StackPreprocess::SetEventFrame(const ReportEventBaseData& rawStack,
675 T* event, uint32_t stackMapId, const std::string& type)
676 {
677 event->set_pid(pid_);
678 event->set_tid(rawStack.tid);
679 event->set_addr(rawStack.addr);
680 if constexpr (std::is_same<T, ::MmapEvent>::value || std::is_same<T, ProtoEncoder::MmapEvent>::value) {
681 event->set_type(type);
682 }
683
684 if constexpr (!std::is_same<T, ::FreeEvent>::value && !std::is_same<T, ProtoEncoder::FreeEvent>::value) {
685 auto size = static_cast<uint64_t>(rawStack.mallocSize);
686 #ifdef USE_JEMALLOC
687 if constexpr (std::is_same<T, ::AllocEvent>::value || std::is_same<T, ProtoEncoder::AllocEvent>::value) {
688 size = static_cast<uint64_t>(ComputeAlign(size));
689 }
690 #endif
691 event->set_size(size);
692 }
693 if (hookConfig_.callframe_compress() && stackMapId != 0) {
694 event->set_thread_name_id(rawStack.tid);
695 event->set_stack_id(stackMapId);
696 }
697 event->set_thread_name_id(rawStack.tid);
698 }
699
700 template <typename T>
SetEventFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T * event,uint32_t stackMapId,const std::string & type)701 void StackPreprocess::SetEventFrame(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
702 T* event, uint32_t stackMapId, const std::string& type)
703 {
704 // ignore the first two frame if dwarf unwind
705 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
706 event->set_pid(rawStack->stackConext->pid);
707 event->set_tid(rawStack->stackConext->tid);
708 event->set_addr((uint64_t)rawStack->stackConext->addr);
709
710 if constexpr (std::is_same<T, ::MmapEvent>::value || std::is_same<T, ProtoEncoder::MmapEvent>::value) {
711 event->set_type(type);
712 }
713
714 if constexpr (!std::is_same<T, ::FreeEvent>::value && !std::is_same<T, ProtoEncoder::FreeEvent>::value) {
715 auto size = static_cast<uint64_t>(rawStack->stackConext->mallocSize);
716 #ifdef USE_JEMALLOC
717 if constexpr (std::is_same<T, ::AllocEvent>::value || std::is_same<T, ProtoEncoder::AllocEvent>::value) {
718 size = static_cast<uint64_t>(ComputeAlign(size));
719 }
720 #endif
721 event->set_size(size);
722 }
723
724 if (hookConfig_.callframe_compress() && stackMapId != 0) {
725 event->set_thread_name_id(rawStack->stackConext->tid);
726 event->set_stack_id(stackMapId);
727 } else {
728 for (; idx < callFrames.size(); ++idx) {
729 auto frame = event->add_frame_info();
730 SetFrameInfo(*frame, callFrames[idx]);
731 }
732 event->set_thread_name_id(rawStack->stackConext->tid);
733 }
734 }
735
FillNapiStack(std::string & tagName,std::vector<CallFrame> & callFrames,uint64_t napiIndex)736 void StackPreprocess::FillNapiStack(std::string& tagName, std::vector<CallFrame>& callFrames, uint64_t napiIndex)
737 {
738 #if defined(__aarch64__)
739 uintptr_t pacMask = 0xFFFFFF8000000000;
740 #else
741 uintptr_t pacMask = 0;
742 #endif
743 CallFrame& jsCallFrame = callFrames_.emplace_back(0 & (~pacMask));
744 jsCallFrame.symbolName_ = tagName;
745 jsCallFrame.isJsFrame_ = true;
746 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
747 jsCallFrame.needReport_ |= SYMBOL_NAME_ID_REPORT;
748 jsCallFrame.needReport_ |= FILE_PATH_ID_REPORT;
749 jsCallFrame.callFrameId_ = DWARF_NAPI_CALLBACK + napiIndex;
750 jsCallFrame.symbolNameId_ = DWARF_NAPI_CALLBACK + napiIndex;
751 jsCallFrame.filePathId_ = DWARF_NAPI_CALLBACK + napiIndex;
752 jsCallFrame.filePath_ = "no-napi-file-path";
753 }
754
755 template <typename T>
SetAllocStatisticsFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T & stackData)756 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
757 T& stackData)
758 {
759 // ignore the first two frame if dwarf unwind
760 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
761 callStack_.clear();
762 bool isNapi = false;
763 if (hookConfig_.js_stack_report() == NAPI_CALL_STACK) {
764 std::string tagName;
765 GetMemTag(rawStack->stackConext->tagId, tagName);
766 if (tagName.find("napi") != std::string::npos) {
767 callStack_.reserve(callFrames.size() + FRAME_DEPTH); // insert a frame
768 callStack_.push_back((DWARF_NAPI_CALLBACK + napiIndex_) | JS_OFFLINE_IP_MASK);
769 isNapi = true;
770 }
771 } else {
772 callStack_.reserve(callFrames.size() + 1);
773 }
774 callStack_.push_back(rawStack->stackConext->mallocSize | SIZE_MASK);
775 if (!hookConfig_.offline_symbolization()) {
776 FillCallStack(callFrames, idx, stackData);
777 } else {
778 FillOfflineCallStack(callFrames, idx, stackData);
779 }
780 // insert a frame
781 if (isNapi) {
782 std::string tagName;
783 GetMemTag(rawStack->stackConext->tagId, tagName);
784 FillNapiStack(tagName, callFrames, napiIndex_);
785 ReportFrameMap(callFrames.back(), stackData);
786 ++napiIndex_;
787 }
788 std::lock_guard<std::mutex> guard(mtx_);
789 // by call stack id set alloc statistics data.
790 uint32_t stackId = FindCallStackId(callStack_);
791 if (stackId > 0) {
792 SetAllocStatisticsData(rawStack, stackId, true);
793 } else {
794 stackId = SetCallStackMap(stackData);
795 statisticsModelFlushCallstack_ = true;
796 SetAllocStatisticsData(rawStack, stackId);
797 }
798 }
799
800 template <typename T>
SetAllocStatisticsFrame(const RawStackPtr & rawStack,T & stackData)801 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack, T& stackData)
802 {
803 callStack_.resize(rawStack->fpDepth + 1);
804 callStack_[0] = (rawStack->stackConext->mallocSize | SIZE_MASK);
805 if (memcpy_s(callStack_.data() + 1, sizeof(uint64_t) * rawStack->fpDepth,
806 rawStack->data, sizeof(uint64_t) * rawStack->fpDepth) != EOK) {
807 PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s callStack_ failed");
808 return;
809 }
810 std::lock_guard<std::mutex> guard(mtx_);
811 // by call stack id set alloc statistics data.
812 uint32_t stackId = FindCallStackId(callStack_);
813 if (stackId > 0) {
814 SetAllocStatisticsData(rawStack, stackId, true);
815 } else {
816 stackId = SetCallStackMap(stackData);
817 statisticsModelFlushCallstack_ = true;
818 SetAllocStatisticsData(rawStack, stackId);
819 }
820 }
821
822 template <typename T>
SetHookData(RawStackPtr rawStack,T & stackData)823 void StackPreprocess::SetHookData(RawStackPtr rawStack, T& stackData)
824 {
825 if (hookConfig_.statistics_interval() > 0) {
826 // statistical reporting must is compressed and accurate.
827 switch (rawStack->stackConext->type) {
828 case FREE_MSG:
829 case MUNMAP_MSG:
830 case MEMORY_UNUSING_MSG: {
831 SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
832 break;
833 }
834 case MALLOC_MSG:
835 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
836 case MMAP_MSG:
837 case MMAP_FILE_PAGE_MSG:
838 case MEMORY_USING_MSG: {
839 SetAllocStatisticsFrame(rawStack, stackData);
840 break;
841 }
842 case PR_SET_VMA_MSG: {
843 break;
844 }
845 default: {
846 PROFILER_LOG_ERROR(LOG_CORE, "statistics event type: error");
847 break;
848 }
849 }
850 return;
851 }
852 }
853
ReportOfflineSymbolizationData()854 void StackPreprocess::ReportOfflineSymbolizationData()
855 {
856 if (hookConfig_.offline_symbolization() && flushBasicData_) {
857 SetMapsInfo();
858 flushBasicData_ = false;
859 }
860 }
861
862 template <typename T>
SetApplyAndReleaseMatchFrame(RawStackPtr rawStack,std::vector<CallFrame> & callFrames,T & stackData)863 void StackPreprocess::SetApplyAndReleaseMatchFrame(RawStackPtr rawStack, std::vector<CallFrame>& callFrames,
864 T& stackData)
865 {
866 uint32_t stackMapId = 0;
867 if (rawStack->stackConext->type != PR_SET_VMA_MSG) {
868 stackMapId = GetCallStackId(rawStack, callFrames, stackData);
869 } else {
870 rawStack->stackConext->tagId = prctlPeriodTags_.size();
871 prctlPeriodTags_.emplace_back(reinterpret_cast<char*>(rawStack->data));
872 applyAndReleaseMatchPeriodListData_.emplace_back(rawStack->stackConext);
873 }
874 if (rawStack->stackConext->type == MALLOC_MSG) {
875 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
876 } else if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
877 return;
878 }
879 uint64_t addr = reinterpret_cast<uint64_t>(rawStack->stackConext->addr);
880 auto iter = applyAndReleaseMatchIntervallMap_.find(addr);
881 if (iter != applyAndReleaseMatchIntervallMap_.end()) {
882 applyAndReleaseMatchPeriodListData_.erase(iter->second);
883 applyAndReleaseMatchIntervallMap_.erase(addr);
884 } else {
885 applyAndReleaseMatchPeriodListData_.emplace_back(rawStack->stackConext, stackMapId);
886 applyAndReleaseMatchIntervallMap_.emplace(addr, std::prev(applyAndReleaseMatchPeriodListData_.end()));
887 }
888 }
889
890 template <typename T>
SetHookData(RawStackPtr rawStack,std::vector<CallFrame> & callFrames,T & stackData)891 void StackPreprocess::SetHookData(RawStackPtr rawStack, std::vector<CallFrame>& callFrames, T& stackData)
892 {
893 // statistical reporting must is compressed and accurate.
894 if (hookConfig_.statistics_interval() > 0) {
895 switch (rawStack->stackConext->type) {
896 case FREE_MSG:
897 case MUNMAP_MSG:
898 case MEMORY_UNUSING_MSG: {
899 SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
900 break;
901 }
902 case MALLOC_MSG:
903 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
904 case MMAP_MSG:
905 case MMAP_FILE_PAGE_MSG:
906 case MEMORY_USING_MSG: {
907 SetAllocStatisticsFrame(rawStack, callFrames, stackData);
908 break;
909 }
910 case PR_SET_VMA_MSG: {
911 break;
912 }
913 default: {
914 PROFILER_LOG_ERROR(LOG_CORE, "statistics event type:%d error", rawStack->stackConext->type);
915 break;
916 }
917 }
918 return;
919 }
920
921 uint32_t stackMapId = 0;
922 if (hookConfig_.callframe_compress() &&
923 !(rawStack->stackConext->type == MEMORY_TAG || rawStack->stackConext->type == PR_SET_VMA_MSG)) {
924 stackMapId = GetCallStackId(rawStack, callFrames, stackData);
925 }
926
927 if ((!hookConfig_.callframe_compress() || stackMapId == 0) && hookConfig_.string_compressed()) {
928 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
929 for (; idx < callFrames.size(); ++idx) {
930 ReportSymbolNameMap(callFrames[idx], stackData);
931 ReportFilePathMap(callFrames[idx], stackData);
932 }
933 }
934
935 auto hookData = stackData.add_events();
936 hookData->set_tv_sec(rawStack->stackConext->ts.tv_sec);
937 hookData->set_tv_nsec(rawStack->stackConext->ts.tv_nsec);
938
939 if (rawStack->stackConext->type == MALLOC_MSG) {
940 auto allocEvent = hookData->mutable_alloc_event();
941 SetEventFrame(rawStack, callFrames, allocEvent, stackMapId);
942 } else if (rawStack->stackConext->type == FREE_MSG) {
943 auto freeEvent = hookData->mutable_free_event();
944 SetEventFrame(rawStack, callFrames, freeEvent, stackMapId);
945 } else if (rawStack->stackConext->type == MMAP_MSG) {
946 auto mmapEvent = hookData->mutable_mmap_event();
947 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId);
948 } else if (rawStack->stackConext->type == MMAP_FILE_PAGE_MSG) {
949 auto mmapEvent = hookData->mutable_mmap_event();
950 const std::string prefix = "FilePage:";
951 std::string tagName;
952 if (GetMemTag(rawStack->stackConext->tagId, tagName)) {
953 tagName = prefix + tagName;
954 }
955 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId, tagName);
956 } else if (rawStack->stackConext->type == MUNMAP_MSG) {
957 auto munmapEvent = hookData->mutable_munmap_event();
958 SetEventFrame(rawStack, callFrames, munmapEvent, stackMapId);
959 } else if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
960 auto tagEvent = hookData->mutable_tag_event();
961 const std::string prefix = "Anonymous:";
962 std::string tagName(reinterpret_cast<char*>(rawStack->data));
963 tagEvent->set_addr((uint64_t)rawStack->stackConext->addr);
964 tagEvent->set_size(rawStack->stackConext->mallocSize);
965 tagEvent->set_tag(prefix + tagName);
966 tagEvent->set_pid(pid_);
967 } else if (rawStack->stackConext->type == MEMORY_USING_MSG) {
968 auto mmapEvent = hookData->mutable_mmap_event();
969 std::string tagName;
970 GetMemTag(rawStack->stackConext->tagId, tagName);
971 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId, tagName);
972 } else if (rawStack->stackConext->type == MEMORY_UNUSING_MSG) {
973 auto munmapEvent = hookData->mutable_munmap_event();
974 SetEventFrame(rawStack, callFrames, munmapEvent, stackMapId);
975 }
976 }
977
SetFreeStatisticsData(uint64_t addr)978 inline bool StackPreprocess::SetFreeStatisticsData(uint64_t addr)
979 {
980 // through the addr lookup record
981 auto addrIter = allocAddrMap_.find(addr);
982 if (addrIter != allocAddrMap_.end()) {
983 auto& record = addrIter->second.second;
984 ++record->releaseCount;
985 record->releaseSize += addrIter->second.first;
986 statisticsPeriodData_[record->callstackId] = record;
987 allocAddrMap_.erase(addr);
988 return true;
989 }
990 return false;
991 }
992
SetAllocStatisticsData(const RawStackPtr & rawStack,size_t stackId,bool isExists)993 inline void StackPreprocess::SetAllocStatisticsData(const RawStackPtr& rawStack, size_t stackId, bool isExists)
994 {
995 // if the record exists, it is updated.Otherwise Add
996 if (isExists) {
997 auto recordIter = recordStatisticsMap_.find(stackId);
998 if (recordIter != recordStatisticsMap_.end()) {
999 auto& record = recordIter->second;
1000 ++record.applyCount;
1001 record.applySize += rawStack->stackConext->mallocSize;
1002 allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
1003 std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
1004 statisticsPeriodData_[stackId] = &recordIter->second;
1005 }
1006 } else {
1007 RecordStatistic record;
1008 record.pid = rawStack->stackConext->pid;
1009 record.callstackId = stackId;
1010 record.applyCount = 1;
1011 record.applySize = rawStack->stackConext->mallocSize;
1012 switch (rawStack->stackConext->type) {
1013 case MALLOC_MSG: {
1014 record.type = RecordStatisticsEvent::MALLOC;
1015 break;
1016 }
1017 case MMAP_MSG: {
1018 record.type = RecordStatisticsEvent::MMAP;
1019 break;
1020 }
1021 case MMAP_FILE_PAGE_MSG: {
1022 record.type = RecordStatisticsEvent::FILE_PAGE_MSG;
1023 break;
1024 }
1025 case MEMORY_USING_MSG: {
1026 record.type = RecordStatisticsEvent::MEMORY_USING_MSG;
1027 record.tagId = rawStack->stackConext->tagId;
1028 break;
1029 }
1030 default: {
1031 PROFILER_LOG_ERROR(LOG_CORE, "SetAllocStatisticsData event type error");
1032 break;
1033 }
1034 }
1035
1036 auto [recordIter, stat] = recordStatisticsMap_.emplace(stackId, record);
1037 allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
1038 std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
1039 statisticsPeriodData_[stackId] = &recordIter->second;
1040 }
1041 }
1042
WriteFrames(RawStackPtr rawStack,const std::vector<CallFrame> & callFrames)1043 void StackPreprocess::WriteFrames(RawStackPtr rawStack, const std::vector<CallFrame>& callFrames)
1044 {
1045 CHECK_TRUE(fpHookData_ != nullptr, NO_RETVAL, "fpHookData_ is nullptr, please check file_name(%s)",
1046 hookConfig_.file_name().c_str());
1047 if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
1048 const std::string prefix = "Anonymous:";
1049 std::string tagName(reinterpret_cast<char*>(rawStack->data));
1050 fprintf(fpHookData_, "prctl;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ":tag:%s\n",
1051 rawStack->stackConext->pid, rawStack->stackConext->tid,
1052 (int64_t)rawStack->stackConext->ts.tv_sec, rawStack->stackConext->ts.tv_nsec,
1053 (uint64_t)rawStack->stackConext->addr, (prefix + tagName).c_str());
1054 return;
1055 }
1056 std::string tag = "";
1057 switch (rawStack->stackConext->type) {
1058 case FREE_MSG:
1059 tag = "free";
1060 break;
1061 case MALLOC_MSG:
1062 tag = "malloc";
1063 break;
1064 case MMAP_MSG:
1065 tag = "mmap";
1066 break;
1067 case MUNMAP_MSG:
1068 tag = "munmap";
1069 break;
1070 default:
1071 break;
1072 }
1073
1074 fprintf(fpHookData_, "%s;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ";%zu\n", tag.c_str(),
1075 rawStack->stackConext->pid, rawStack->stackConext->tid, (int64_t)rawStack->stackConext->ts.tv_sec,
1076 rawStack->stackConext->ts.tv_nsec, (uint64_t)rawStack->stackConext->addr, rawStack->stackConext->mallocSize);
1077 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
1078 for (; idx < callFrames.size(); ++idx) {
1079 (void)fprintf(fpHookData_, "0x%" PRIx64 ";0x%" PRIx64 ";%s;%s;0x%" PRIx64 ";%" PRIu64 "\n",
1080 callFrames[idx].ip_, callFrames[idx].sp_, std::string(callFrames[idx].symbolName_).c_str(),
1081 std::string(callFrames[idx].filePath_).c_str(), callFrames[idx].offset_, callFrames[idx].symbolOffset_);
1082 }
1083 }
1084
1085 template <typename T>
SetFrameInfo(T & frame,CallFrame & callFrame)1086 inline void StackPreprocess::SetFrameInfo(T& frame, CallFrame& callFrame)
1087 {
1088 frame.set_ip(callFrame.ip_);
1089 if (hookConfig_.offline_symbolization()) {
1090 // when js mixes offline symbols, the js call stack is reported according to the online symbolization
1091 if (callFrame.isJsFrame_ && callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0) {
1092 frame.set_sp(callFrame.sp_);
1093 frame.set_offset(callFrame.offset_);
1094 frame.set_symbol_offset(callFrame.symbolOffset_);
1095 frame.set_symbol_name_id(callFrame.symbolNameId_);
1096 frame.set_file_path_id(callFrame.filePathId_);
1097 }
1098 return;
1099 }
1100 frame.set_sp(callFrame.sp_);
1101 if (!(callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0)) {
1102 frame.set_symbol_name(std::string(callFrame.symbolName_));
1103 frame.set_file_path(std::string(callFrame.filePath_));
1104 }
1105 frame.set_offset(callFrame.offset_);
1106 frame.set_symbol_offset(callFrame.symbolOffset_);
1107 if (callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0) {
1108 frame.set_symbol_name_id(callFrame.symbolNameId_);
1109 frame.set_file_path_id(callFrame.filePathId_);
1110 }
1111 }
1112
1113 template <typename T>
ReportSymbolNameMap(CallFrame & callFrame,T & stackData)1114 inline void StackPreprocess::ReportSymbolNameMap(CallFrame& callFrame, T& stackData)
1115 {
1116 if (callFrame.needReport_ & SYMBOL_NAME_ID_REPORT) {
1117 auto hookData = stackData.add_events();
1118 auto symbolMap = hookData->mutable_symbol_name();
1119 symbolMap->set_id(callFrame.symbolNameId_);
1120 symbolMap->set_name(std::string(callFrame.symbolName_));
1121 symbolMap->set_pid(pid_);
1122 }
1123 }
1124
1125 template <typename T>
ReportFilePathMap(CallFrame & callFrame,T & stackData)1126 inline void StackPreprocess::ReportFilePathMap(CallFrame& callFrame, T& stackData)
1127 {
1128 if (callFrame.needReport_ & FILE_PATH_ID_REPORT) {
1129 auto hookData = stackData.add_events();
1130 auto filePathMap = hookData->mutable_file_path();
1131 filePathMap->set_id(callFrame.filePathId_);
1132 filePathMap->set_name(std::string(callFrame.filePath_));
1133 filePathMap->set_pid(pid_);
1134 }
1135 }
1136
1137 template <typename T>
ReportFrameMap(CallFrame & callFrame,T & stackData)1138 inline void StackPreprocess::ReportFrameMap(CallFrame& callFrame, T& stackData)
1139 {
1140 if (callFrame.needReport_ & CALL_FRAME_REPORT) {
1141 if ((!hookConfig_.fp_unwind()) && callFrame.callFrameId_ == DWARF_ERROR_ID && !unwindFailReport_) {
1142 return;
1143 } else if ((!hookConfig_.fp_unwind()) && callFrame.callFrameId_ == DWARF_ERROR_ID && unwindFailReport_) {
1144 unwindFailReport_ = false;
1145 }
1146 ReportSymbolNameMap(callFrame, stackData);
1147 ReportFilePathMap(callFrame, stackData);
1148 auto hookData = stackData.add_events();
1149 auto frameMap = hookData->mutable_frame_map();
1150 frameMap->set_id(callFrame.callFrameId_);
1151 auto frame = frameMap->mutable_frame();
1152 SetFrameInfo(*frame, callFrame);
1153 frameMap->set_pid(pid_);
1154 }
1155 }
1156
SetMapsInfo()1157 void StackPreprocess::SetMapsInfo()
1158 {
1159 std::lock_guard<std::mutex> guard(mtx_);
1160 for (auto& itemSoBegin : runtime_instance->GetOfflineMaps()) {
1161 auto& maps = runtime_instance->GetMapsCache();
1162 auto mapsIter = maps.find(itemSoBegin);
1163 if (mapsIter == maps.end()) {
1164 continue;
1165 }
1166
1167 ElfSymbolTable symbolInfo;
1168 auto& curMemMaps = mapsIter->second;
1169 GetSymbols(curMemMaps->name_, symbolInfo);
1170 if (symbolInfo.symEntSize == 0) {
1171 continue;
1172 }
1173 std::visit([&](auto& stackData) {
1174 auto hookData = stackData.add_events();
1175 auto filepathMap = hookData->mutable_file_path();
1176 filepathMap->set_id(curMemMaps->filePathId_);
1177 filepathMap->set_name(curMemMaps->name_);
1178 filepathMap->set_pid(pid_);
1179 SetSymbolInfo(curMemMaps->filePathId_, symbolInfo, stackData);
1180
1181 for (auto& map : curMemMaps->GetMaps()) {
1182 if (map->prots & PROT_EXEC) {
1183 auto nativeHookData = stackData.add_events();
1184 auto mapSerialize = nativeHookData->mutable_maps_info();
1185 mapSerialize->set_pid(pid_);
1186 mapSerialize->set_start(map->begin);
1187 mapSerialize->set_end(map->end);
1188 mapSerialize->set_offset(map->offset);
1189 mapSerialize->set_file_path_id(curMemMaps->filePathId_);
1190 }
1191 }
1192 FlushData(stackData);
1193 }, stackData_);
1194 }
1195 runtime_instance->ClearOfflineMaps();
1196 }
1197
1198 template <typename T>
SetSymbolInfo(uint32_t filePathId,ElfSymbolTable & symbolInfo,T & batchNativeHookData)1199 void StackPreprocess::SetSymbolInfo(uint32_t filePathId, ElfSymbolTable& symbolInfo, T& batchNativeHookData)
1200 {
1201 if (symbolInfo.symEntSize == 0) {
1202 PROFILER_LOG_ERROR(LOG_CORE, "SetSymbolInfo get symbolInfo failed");
1203 return;
1204 }
1205 auto hookData = batchNativeHookData.add_events();
1206 auto symTable = hookData->mutable_symbol_tab();
1207 symTable->set_file_path_id(filePathId);
1208 symTable->set_text_exec_vaddr(symbolInfo.textVaddr);
1209 symTable->set_text_exec_vaddr_file_offset(symbolInfo.textOffset);
1210 symTable->set_sym_entry_size(symbolInfo.symEntSize);
1211 symTable->set_sym_table(symbolInfo.symTable.data(), symbolInfo.symTable.size());
1212 symTable->set_str_table(symbolInfo.strTable.data(), symbolInfo.strTable.size());
1213 symTable->set_pid(pid_);
1214 }
1215
1216 template <typename T>
FlushCheck(T & stackData)1217 void StackPreprocess::FlushCheck(T& stackData)
1218 {
1219 if (hookConfig_.statistics_interval() > 0) {
1220 if (!statisticsModelFlushCallstack_) {
1221 return;
1222 }
1223 if constexpr (std::is_same<T, ::BatchNativeHookData>::value) {
1224 FlushData(stackData);
1225 } else {
1226 uint64_t dataLen = static_cast<uint64_t>(stackData.Size());
1227 if (dataLen > flushSize_) {
1228 FlushData(stackData);
1229 }
1230 }
1231 statisticsModelFlushCallstack_ = false;
1232 } else {
1233 FlushData(stackData);
1234 }
1235 }
1236
FlushData(BatchNativeHookData & stackData)1237 void StackPreprocess::FlushData(BatchNativeHookData& stackData)
1238 {
1239 if (buffer_ == nullptr) {
1240 return;
1241 }
1242 if (stackData.events().size() > 0) {
1243 size_t length = stackData.ByteSizeLong();
1244 stackData.SerializeToArray(buffer_.get(), length);
1245 if (length < bufferSize_) {
1246 if (isHookStandaloneSerialize_) {
1247 std::string str;
1248 ForStandard::BatchNativeHookData StandardStackData;
1249 StandardStackData.ParseFromArray(buffer_.get(), length);
1250 google::protobuf::TextFormat::PrintToString(StandardStackData, &str);
1251 size_t n = fwrite(str.data(), 1, str.size(), fpHookData_);
1252 fflush(fpHookData_);
1253 std::get<::BatchNativeHookData>(stackData_).clear_events();
1254 PROFILER_LOG_DEBUG(LOG_CORE, "Flush Data fwrite n = %zu str.size() = %zu", n, str.size());
1255 } else {
1256 Flush(buffer_.get(), length);
1257 }
1258 } else {
1259 PROFILER_LOG_ERROR(LOG_CORE, "the data is larger than MAX_BUFFER_SIZE, flush failed");
1260 }
1261 }
1262 }
1263
FlushData(ProtoEncoder::BatchNativeHookData & stackData)1264 void StackPreprocess::FlushData(ProtoEncoder::BatchNativeHookData& stackData)
1265 {
1266 if (stackData.Size() == 0) {
1267 return;
1268 }
1269
1270 int messageLen = stackData.Finish();
1271 RandomWriteCtx* ctx = nullptr;
1272 if (!isSaService_) {
1273 resultWriter_->finishReport(resultWriter_, messageLen);
1274 resultWriter_->flush(resultWriter_);
1275 ctx = resultWriter_->startReport(resultWriter_);
1276 } else {
1277 profilerPluginData_.finishAdd_data(messageLen);
1278 FinishReport();
1279 ctx = StartReport();
1280 }
1281
1282 if (ctx == nullptr) {
1283 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
1284 return;
1285 }
1286 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
1287 }
1288
Flush(const uint8_t * src,size_t size)1289 void StackPreprocess::Flush(const uint8_t* src, size_t size)
1290 {
1291 if (src == nullptr) {
1292 PROFILER_LOG_ERROR(LOG_CORE, "Flush src is nullptr");
1293 return;
1294 }
1295
1296 if (writer_ == nullptr) {
1297 PROFILER_LOG_ERROR(LOG_CORE, "Flush writer_ is nullptr");
1298 return;
1299 }
1300 writer_->Write(src, size);
1301 writer_->Flush();
1302
1303 std::get<::BatchNativeHookData>(stackData_).clear_events();
1304 }
1305
GetSymbols(const std::string & filePath,ElfSymbolTable & symbols)1306 void StackPreprocess::GetSymbols(const std::string& filePath, ElfSymbolTable& symbols)
1307 {
1308 std::shared_ptr<DfxElf> elfPtr = std::make_shared<DfxElf>(filePath);
1309
1310 symbols.textVaddr = elfPtr->GetStartVaddr();
1311 symbols.textOffset = elfPtr->GetStartOffset();
1312 if (symbols.textVaddr == (std::numeric_limits<uint64_t>::max)()) {
1313 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get textVaddr failed");
1314 return;
1315 }
1316
1317 std::string symSecName;
1318 std::string strSecName;
1319 ShdrInfo shdr;
1320 if (elfPtr->GetSectionInfo(shdr, ".symtab")) {
1321 symSecName = ".symtab";
1322 strSecName = ".strtab";
1323 } else if (elfPtr->GetSectionInfo(shdr, ".dynsym")) {
1324 symSecName = ".dynsym";
1325 strSecName = ".dynstr";
1326 } else {
1327 return;
1328 }
1329 symbols.symEntSize = shdr.entSize;
1330 symbols.symTable.resize(shdr.size);
1331 if (!elfPtr->GetSectionData(symbols.symTable.data(), shdr.size, symSecName)) {
1332 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get symbol section data failed");
1333 return;
1334 }
1335 if (!elfPtr->GetSectionInfo(shdr, strSecName)) {
1336 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
1337 return;
1338 }
1339 symbols.strTable.resize(shdr.size);
1340 if (!elfPtr->GetSectionData(symbols.strTable.data(), shdr.size, strSecName)) {
1341 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
1342 return;
1343 }
1344 }
1345
FlushRecordStatistics()1346 bool StackPreprocess::FlushRecordStatistics()
1347 {
1348 if (statisticsPeriodData_.empty()) {
1349 return false;
1350 }
1351 std::visit([&](auto& stackData) {
1352 FlushData(stackData);
1353 }, stackData_);
1354 std::visit([&](auto& stackData) {
1355 struct timespec ts;
1356 clock_gettime(hookDataClockId_, &ts);
1357 for (auto [addr, statistics] : statisticsPeriodData_) {
1358 auto hookData = stackData.add_events();
1359 hookData->set_tv_sec(ts.tv_sec);
1360 hookData->set_tv_nsec(ts.tv_nsec);
1361 auto recordEvent = hookData->mutable_statistics_event();
1362 recordEvent->set_pid(statistics->pid);
1363 recordEvent->set_callstack_id(statistics->callstackId);
1364 recordEvent->set_type(statistics->type);
1365 recordEvent->set_apply_count(statistics->applyCount);
1366 recordEvent->set_release_count(statistics->releaseCount);
1367 recordEvent->set_apply_size(statistics->applySize);
1368 recordEvent->set_release_size(statistics->releaseSize);
1369 }
1370 FlushData(stackData);
1371 }, stackData_);
1372 statisticsPeriodData_.clear();
1373 return true;
1374 }
1375
SaveMemTag(uint32_t tagId,const std::string & tagName)1376 void StackPreprocess::SaveMemTag(uint32_t tagId, const std::string& tagName)
1377 {
1378 std::string temp;
1379 bool res = memTagMap_.Find(tagId, temp);
1380 if (!res) {
1381 memTagMap_.EnsureInsert(tagId, tagName);
1382 }
1383 }
1384
GetMemTag(uint32_t tagId,std::string & tagName)1385 bool StackPreprocess::GetMemTag(uint32_t tagId, std::string& tagName)
1386 {
1387 return memTagMap_.Find(tagId, tagName);
1388 }
1389
SaveJsRawStack(uint64_t jsChainId,const char * jsRawStack)1390 void StackPreprocess::SaveJsRawStack(uint64_t jsChainId, const char* jsRawStack)
1391 {
1392 auto iterChainId = jsStackMap_.find(jsChainId);
1393 if (iterChainId == jsStackMap_.end()) {
1394 auto iterRawStack = jsStackSet_.find(jsRawStack);
1395 if (iterRawStack == jsStackSet_.end()) {
1396 auto iter = jsStackSet_.insert(jsRawStack);
1397 jsStackMap_[jsChainId] = iter.first->c_str();
1398 } else {
1399 jsStackMap_[jsChainId] = iterRawStack->c_str();
1400 }
1401 }
1402 }
1403
GetJsRawStack(uint64_t jsChainId)1404 const char* StackPreprocess::GetJsRawStack(uint64_t jsChainId)
1405 {
1406 auto iter = jsStackMap_.find(jsChainId);
1407 if (iter != jsStackMap_.end()) {
1408 return iter->second;
1409 }
1410 return nullptr;
1411 }
1412
LgFloor(unsigned long val)1413 unsigned StackPreprocess::LgFloor(unsigned long val)
1414 {
1415 val |= (val >> RIGHT_MOVE_1);
1416 val |= (val >> RIGHT_MOVE_2);
1417 val |= (val >> RIGHT_MOVE_4);
1418 val |= (val >> RIGHT_MOVE_8);
1419 val |= (val >> RIGHT_MOVE_16);
1420 if (sizeof(val) > 4) { // 4: sizeThreshold
1421 int constant = sizeof(val) * 4; // 4: sizeThreshold
1422 val |= (val >> constant);
1423 }
1424 val++;
1425 if (val == 0) {
1426 return 8 * sizeof(val) - 1; // 8: 8byte
1427 }
1428 return __builtin_ffsl(val) - 2; // 2: adjustment
1429 }
1430
PowCeil(uint64_t val)1431 uint64_t StackPreprocess::PowCeil(uint64_t val)
1432 {
1433 size_t msbIndex = LgFloor(val - 1);
1434 return 1ULL << (msbIndex + 1);
1435 }
1436
ComputeAlign(size_t size)1437 size_t StackPreprocess::ComputeAlign(size_t size)
1438 {
1439 if (size == 0) {
1440 return 0;
1441 }
1442 unsigned index = 0;
1443 if (size <= (size_t(1) << SC_LG_TINY_MAXCLASS)) {
1444 unsigned lgTmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
1445 unsigned lgCeil = LgFloor(PowCeil(size));
1446 index = (lgCeil < lgTmin) ? 0 : lgCeil - lgTmin;
1447 } else {
1448 unsigned floor = LgFloor((size << 1) - 1);
1449 unsigned shift = (floor < SC_LG_NGROUP + LG_QUANTUM) ? 0 : floor - (SC_LG_NGROUP + LG_QUANTUM);
1450 unsigned grp = shift << SC_LG_NGROUP;
1451 unsigned lgDelta = (floor < SC_LG_NGROUP + LG_QUANTUM + 1) ? LG_QUANTUM : floor - SC_LG_NGROUP - 1;
1452 size_t deltaInverseMask = size_t(-1) << lgDelta;
1453 unsigned mod = ((((size - 1) & deltaInverseMask) >> lgDelta)) & ((size_t(1) << SC_LG_NGROUP) - 1);
1454 index = SC_NTINY + grp + mod;
1455 }
1456
1457 if (index < NTBINS) {
1458 return (size_t(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
1459 }
1460 size_t reducedIndex = index - NTBINS;
1461 size_t grpVal = reducedIndex >> LG_SIZE_CLASS_GROUP;
1462 size_t modVal = reducedIndex & ((size_t(1) << LG_SIZE_CLASS_GROUP) - 1);
1463 size_t grpSizeMask = ~((!!grpVal) - 1);
1464 size_t grpSize = ((size_t(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP - 1))) << grpVal) & grpSizeMask;
1465 size_t shiftVal = (grpVal == 0) ? 1 : grpVal;
1466 size_t lgDeltaVal = shiftVal + (LG_QUANTUM - 1);
1467 size_t modSize = (modVal + 1) << lgDeltaVal;
1468 size_t usize = grpSize + modSize;
1469 return usize;
1470 }
1471
WriteHookConfig()1472 void StackPreprocess::WriteHookConfig()
1473 {
1474 const size_t configSize = hookConfig_.ByteSizeLong();
1475 auto buffer = std::make_unique<uint8_t[]>(configSize);
1476 hookConfig_.SerializeToArray(buffer.get(), configSize);
1477
1478 writer_->ResetPos();
1479 profilerPluginData_.Reset(writer_->GetCtx());
1480 profilerPluginData_.set_name("nativehook_config");
1481 profilerPluginData_.set_version("1.02");
1482 profilerPluginData_.set_status(0);
1483 profilerPluginData_.set_data(buffer.get(), configSize);
1484
1485 FinishReport();
1486
1487 auto ctx = StartReport();
1488 if (ctx == nullptr) {
1489 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
1490 return;
1491 }
1492 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
1493 }
1494
StartReport()1495 RandomWriteCtx* StackPreprocess::StartReport()
1496 {
1497 writer_->ResetPos();
1498 profilerPluginData_.Reset(writer_->GetCtx());
1499 profilerPluginData_.set_name("nativehook");
1500 profilerPluginData_.set_version("1.02");
1501 profilerPluginData_.set_status(0);
1502 return profilerPluginData_.startAdd_data();
1503 }
1504
FinishReport()1505 void StackPreprocess::FinishReport()
1506 {
1507 struct timespec ts;
1508 clock_gettime(pluginDataClockId_, &ts);
1509 profilerPluginData_.set_clock_id(static_cast<ProfilerPluginData_ClockId>(pluginDataClockId_));
1510 profilerPluginData_.set_tv_sec(ts.tv_sec);
1511 profilerPluginData_.set_tv_nsec(ts.tv_nsec);
1512
1513 int32_t len = profilerPluginData_.Finish();
1514 if (writer_ == nullptr) {
1515 PROFILER_LOG_ERROR(LOG_CORE, "%s: the writer is nullptr!", __func__);
1516 return;
1517 }
1518 writer_->FinishReport(len);
1519 }
1520
FillFpNativeIp(RawStackPtr & rawData)1521 void StackPreprocess::FillFpNativeIp(RawStackPtr& rawData)
1522 {
1523 #if defined(__aarch64__)
1524 uintptr_t pacMask = 0xFFFFFF8000000000;
1525 #else
1526 uintptr_t pacMask = 0;
1527 #endif
1528 uint64_t* fpIp = reinterpret_cast<uint64_t *>(rawData->data);
1529 for (uint8_t idx = 0; idx < rawData->fpDepth ; ++idx) {
1530 if (fpIp[idx] == 0) {
1531 break;
1532 }
1533 callFrames_.emplace_back(fpIp[idx] & (~pacMask));
1534 }
1535 }
1536
FillFpJsData(RawStackPtr & rawData)1537 void StackPreprocess::FillFpJsData(RawStackPtr& rawData)
1538 {
1539 if (hookConfig_.statistics_interval() > 0) {
1540 switch (rawData->stackConext->type) {
1541 case FREE_MSG:
1542 case MUNMAP_MSG:
1543 case MEMORY_UNUSING_MSG:
1544 return;
1545 default:
1546 break;
1547 }
1548 }
1549 fpJsCallStacks_.clear();
1550 /**
1551 * jsStackData:
1552 * ts_malloc1|entry/src/main/ets/pages/Index.ets:5:5,ts_malloc2|entry/src/main/ets/pages/Index.ets:8:5
1553 * | |
1554 * JS_SYMBOL_FILEPATH_SEP JS_CALL_STACK_DEPTH_SEP
1555 * jsCallStack:
1556 * ts_malloc1|entry/src/main/ets/pages/Index.ets:5:5
1557 * / \
1558 * | |
1559 * jsSymbolFilePathSepPos |
1560 * jsFilePathPos = jsSymbolFilePathSepPos + 1
1561 */
1562 AdvancedSplitString(rawData->jsStackData, JS_CALL_STACK_DEPTH_SEP, fpJsCallStacks_);
1563 for (std::string& jsCallStack: fpJsCallStacks_) {
1564 std::string::size_type jsSymbolFilePathSepPos = jsCallStack.find_first_of(JS_SYMBOL_FILEPATH_SEP);
1565 if (jsSymbolFilePathSepPos == std::string::npos) {
1566 PROFILER_LOG_ERROR(LOG_CORE, "%s: jsCallStack find FAILED!", __func__);
1567 continue;
1568 }
1569 std::string::size_type jsFilePathPos = jsSymbolFilePathSepPos + 1;
1570 jsCallStack[jsSymbolFilePathSepPos] = '\0'; // "ts_malloc1'\0'entry/src/main/ets/pages/Index.ets:5:5"
1571 CallFrame& jsCallFrame = callFrames_.emplace_back(0, 0, true);
1572 jsCallFrame.symbolName_ = StringViewMemoryHold::GetInstance().HoldStringView(jsCallStack.c_str());
1573 jsCallFrame.filePath_ = StringViewMemoryHold::GetInstance().HoldStringView(jsCallStack.c_str() + jsFilePathPos);
1574 if (hookConfig_.offline_symbolization()) {
1575 DfxSymbol symbol;
1576 if (!runtime_instance->ArktsGetSymbolCache(jsCallFrame, symbol)) {
1577 symbol.filePathId_ = runtime_instance->FillArkTsFilePath(jsCallFrame.filePath_);
1578 symbol.symbolName_ = jsCallFrame.symbolName_;
1579 symbol.module_ = jsCallFrame.filePath_;
1580 symbol.symbolId_ = runtime_instance->GetJsSymbolCacheSize();
1581 runtime_instance->FillSymbolNameId(jsCallFrame, symbol);
1582 runtime_instance->FillFileSet(jsCallFrame, symbol);
1583 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
1584 runtime_instance->FillJsSymbolCache(jsCallFrame, symbol);
1585 }
1586 jsCallFrame.callFrameId_ = symbol.symbolId_;
1587 jsCallFrame.symbolNameId_ = symbol.symbolNameId_;
1588 jsCallFrame.filePathId_ = symbol.filePathId_;
1589 jsCallFrame.filePath_ = symbol.module_;
1590 jsCallFrame.symbolName_ = symbol.symbolName_;
1591 }
1592 }
1593 }
1594
FillDwarfErrorStack()1595 void StackPreprocess::FillDwarfErrorStack()
1596 {
1597 #if defined(__aarch64__)
1598 uintptr_t pacMask = 0xFFFFFF8000000000;
1599 #else
1600 uintptr_t pacMask = 0;
1601 #endif
1602 CallFrame& jsCallFrame = callFrames_.emplace_back(0 & (~pacMask));
1603 jsCallFrame.symbolName_ = "UnwindErrorDwarf";
1604 jsCallFrame.isJsFrame_ = true;
1605 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
1606 jsCallFrame.needReport_ |= SYMBOL_NAME_ID_REPORT;
1607 jsCallFrame.needReport_ |= FILE_PATH_ID_REPORT;
1608 jsCallFrame.callFrameId_ = DWARF_ERROR_ID;
1609 jsCallFrame.symbolNameId_ = DWARF_ERROR_ID;
1610 jsCallFrame.filePathId_ = DWARF_ERROR_ID;
1611 jsCallFrame.filePath_ = "no-file-path";
1612 }
1613
FlushRecordApplyAndReleaseMatchData()1614 void StackPreprocess::FlushRecordApplyAndReleaseMatchData()
1615 {
1616 if (applyAndReleaseMatchPeriodListData_.empty()) {
1617 return;
1618 }
1619 std::visit([&](auto& stackData) {
1620 for (const auto& rawStack: applyAndReleaseMatchPeriodListData_) {
1621 auto hookData = stackData.add_events();
1622 hookData->set_tv_sec(rawStack.ts.tv_sec);
1623 hookData->set_tv_nsec(rawStack.ts.tv_nsec);
1624 if (rawStack.type == MALLOC_MSG) {
1625 auto allocEvent = hookData->mutable_alloc_event();
1626 SetEventFrame(rawStack, allocEvent, rawStack.stackMapId);
1627 } else if (rawStack.type == FREE_MSG) {
1628 auto freeEvent = hookData->mutable_free_event();
1629 SetEventFrame(rawStack, freeEvent, rawStack.stackMapId);
1630 } else if (rawStack.type == MMAP_MSG) {
1631 auto mmapEvent = hookData->mutable_mmap_event();
1632 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId);
1633 } else if (rawStack.type == MMAP_FILE_PAGE_MSG) {
1634 auto mmapEvent = hookData->mutable_mmap_event();
1635 const std::string prefix = "FilePage:";
1636 std::string tagName;
1637 if (GetMemTag(rawStack.tagId, tagName)) {
1638 tagName = prefix + tagName;
1639 }
1640 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId, tagName);
1641 } else if (rawStack.type == MUNMAP_MSG) {
1642 auto munmapEvent = hookData->mutable_munmap_event();
1643 SetEventFrame(rawStack, munmapEvent, rawStack.stackMapId);
1644 } else if (rawStack.type == PR_SET_VMA_MSG) {
1645 auto tagEvent = hookData->mutable_tag_event();
1646 const std::string prefix = "Anonymous:";
1647 tagEvent->set_addr(rawStack.addr);
1648 tagEvent->set_size(rawStack.mallocSize);
1649 tagEvent->set_tag(prefix + prctlPeriodTags_[rawStack.tagId]);
1650 tagEvent->set_pid(pid_);
1651 } else if (rawStack.type == MEMORY_USING_MSG) {
1652 auto mmapEvent = hookData->mutable_mmap_event();
1653 std::string tagName;
1654 GetMemTag(rawStack.tagId, tagName);
1655 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId, tagName);
1656 } else if (rawStack.type == MEMORY_UNUSING_MSG) {
1657 auto munmapEvent = hookData->mutable_munmap_event();
1658 SetEventFrame(rawStack, munmapEvent, rawStack.stackMapId);
1659 }
1660 }
1661 FlushData(stackData);
1662 }, stackData_);
1663 applyAndReleaseMatchPeriodListData_.clear();
1664 applyAndReleaseMatchIntervallMap_.clear();
1665 prctlPeriodTags_.clear();
1666 }