1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "stack_preprocess.h"
17
18 #include <elf.h>
19 #include <unistd.h>
20
21 #include "common.h"
22 #include "logging.h"
23 #include "plugin_service_types.pb.h"
24 #include "dfx_elf.h"
25 #include "utilities.h"
26 #include "native_hook_result_standard.pb.h"
27 #include "native_hook_config_standard.pb.h"
28 #include "google/protobuf/text_format.h"
29 #include "trace_file_writer.h"
30
31
32 constexpr static uint32_t SC_LG_TINY_MIN = 3;
33 constexpr static uint32_t LG_QUANTUM = 4;
34 constexpr static uint32_t SC_NTINY = LG_QUANTUM - SC_LG_TINY_MIN;
35 constexpr static uint32_t SC_LG_TINY_MAXCLASS = (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1);
36 constexpr static uint32_t SC_LG_NGROUP = 2;
37 constexpr static uint32_t LG_SIZE_CLASS_GROUP = 2;
38 constexpr static uint32_t NTBINS = 1;
39 constexpr static uint32_t LG_TINY_MAXCLASS = 3;
40 constexpr static uint32_t MAX_MATCH_CNT = 1000;
41 constexpr static uint32_t MAX_MATCH_INTERVAL = 3600;
42 constexpr static uint32_t LOG_PRINT_TIMES = 10000;
43 constexpr static uint32_t WAIT_STOP_TIME = 5000;
44 constexpr static uint32_t WAIT_TIME_ONCE = 10;
45 constexpr static uint32_t MAX_BATCH_CNT = 40;
46 constexpr static uint32_t RIGHT_MOVE_1 = 1;
47 constexpr static uint32_t RIGHT_MOVE_2 = 2;
48 constexpr static uint32_t RIGHT_MOVE_4 = 4;
49 constexpr static uint32_t RIGHT_MOVE_8 = 8;
50 constexpr static uint32_t RIGHT_MOVE_16 = 16;
51 constexpr static uint64_t SIZE_MASK = 0xFFFFFF0000000000;
52 constexpr static uint64_t JS_OFFLINE_IP_MASK = 0xFFFFFE0000000000;
53 constexpr static uint64_t DWARF_ERROR_ID = 999999;
54 constexpr static uint64_t DWARF_NAPI_CALLBACK = 999999;
55 static std::string JS_CALL_STACK_DEPTH_SEP = ","; // ',' is js call stack depth separator
56 static std::string JS_SYMBOL_FILEPATH_SEP = "|"; // '|' is js symbol and filepath separator
57 constexpr static int NAPI_CALL_STACK = 2; // just for napi call stack
58 constexpr static uint32_t FRAME_DEPTH = 2; // add two frames
59 constexpr uint32_t FLUSH_BASELINE_SA = (1U << 20);
60 #ifdef PERFORMANCE_DEBUG
61 constexpr static uint32_t LONG_TIME_THRESHOLD = 1000000;
62 static std::atomic<uint64_t> timeCost = 0;
63 static std::atomic<uint64_t> unwindTimes = 0;
64 #endif
65
66 using namespace OHOS::Developtools::NativeDaemon;
67 using namespace OHOS::HiviewDFX;
68 using namespace OHOS::Developtools::Profiler;
69
StackPreprocess(const StackDataRepeaterPtr & dataRepeater,const NativeHookConfig & hookConfig,clockid_t pluginDataClockId,FILE * fpHookData,bool isHookStandalone,bool isSaService,bool isProtobufSerialize)70 StackPreprocess::StackPreprocess(const StackDataRepeaterPtr& dataRepeater, const NativeHookConfig& hookConfig,
71 clockid_t pluginDataClockId, FILE* fpHookData, bool isHookStandalone, bool isSaService, bool isProtobufSerialize)
72 : dataRepeater_(dataRepeater), hookConfig_(hookConfig), pluginDataClockId_(pluginDataClockId),
73 fpHookData_(fpHookData), isHookStandaloneSerialize_(isHookStandalone), isSaService_(isSaService),
74 isProtobufSerialize_(isProtobufSerialize)
75 {
76 runtime_instance = std::make_shared<VirtualRuntime>(hookConfig_);
77 if (hookConfig_.malloc_free_matching_interval() > MAX_MATCH_INTERVAL) {
78 PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_interval());
79 hookConfig_.set_malloc_free_matching_interval(MAX_MATCH_INTERVAL);
80 }
81
82 if (hookConfig_.malloc_free_matching_cnt() > MAX_MATCH_CNT) {
83 PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_cnt());
84 hookConfig_.set_malloc_free_matching_cnt(MAX_MATCH_CNT);
85 }
86 PROFILER_LOG_INFO(LOG_CORE, "malloc_free_matching_interval = %d malloc_free_matching_cnt = %d\n",
87 hookConfig_.malloc_free_matching_interval(), hookConfig_.malloc_free_matching_cnt());
88
89 if (hookConfig_.statistics_interval() > 0) {
90 statisticsInterval_ = std::chrono::seconds(hookConfig_.statistics_interval());
91 recordStatisticsMap_.reserve(STATISTICS_MAP_SZIE);
92 statisticsPeriodData_.reserve(STATISTICS_PERIOD_DATA_SIZE);
93 allocAddrMap_.reserve(ALLOC_ADDRMAMP_SIZE);
94 }
95 if (hookConfig_.malloc_free_matching_interval() > 0) {
96 applyAndReleaseMatchInterval_ = std::chrono::seconds(hookConfig_.malloc_free_matching_interval());
97 applyAndReleaseMatchIntervallMap_.reserve(MATCH_ADDRMAMP_SIZE);
98 }
99 PROFILER_LOG_INFO(LOG_CORE, "statistics_interval = %d statisticsInterval_ = %lld \n",
100 hookConfig_.statistics_interval(), statisticsInterval_.count());
101 PROFILER_LOG_INFO(LOG_CORE, "applyAndReleaseMatchInterval_ = %lld", applyAndReleaseMatchInterval_.count());
102 hookDataClockId_ = COMMON::GetClockId(hookConfig_.clock());
103 PROFILER_LOG_INFO(LOG_CORE, "StackPreprocess(): pluginDataClockId = %d hookDataClockId = %d \n",
104 pluginDataClockId_, hookDataClockId_);
105 if (hookConfig_.save_file() && fpHookData_ == nullptr) {
106 PROFILER_LOG_ERROR(LOG_CORE, "If you need to save the file, please set the file_name");
107 }
108 PROFILER_LOG_INFO(LOG_CORE, "isHookStandaloneSerialize_ = %d", isHookStandaloneSerialize_);
109 #if defined(__arm__)
110 u64regs_.resize(PERF_REG_ARM_MAX);
111 #else
112 u64regs_.resize(PERF_REG_ARM64_MAX);
113 #endif
114 callFrames_.reserve(hookConfig_.max_stack_depth() + hookConfig_.max_js_stack_depth());
115 if (hookConfig_.fp_unwind() && hookConfig_.js_stack_report() > 0) {
116 fpJsCallStacks_.reserve(hookConfig_.max_js_stack_depth());
117 }
118 }
119
~StackPreprocess()120 StackPreprocess::~StackPreprocess()
121 {
122 isStopTakeData_ = true;
123 if (dataRepeater_) {
124 dataRepeater_->Close();
125 }
126 if (thread_.joinable()) {
127 thread_.join();
128 }
129 runtime_instance = nullptr;
130 fpHookData_ = nullptr;
131 }
132
FinishTraceFile()133 void StackPreprocess::FinishTraceFile()
134 {
135 if (isSaService_ && (writer_ != nullptr)) {
136 std::shared_ptr<TraceFileWriter> tfPtr = std::static_pointer_cast<TraceFileWriter>(writer_);
137 tfPtr->SetDurationTime();
138 tfPtr->Finish();
139 }
140 }
141
SetWriter(const std::shared_ptr<Writer> & writer)142 void StackPreprocess::SetWriter(const std::shared_ptr<Writer>& writer)
143 {
144 writer_ = writer;
145 if (!isSaService_) {
146 stackData_ = BatchNativeHookData();
147 }
148 }
149
SetWriter(const WriterStructPtr & writer)150 void StackPreprocess::SetWriter(const WriterStructPtr& writer)
151 {
152 if (writer == nullptr) {
153 return;
154 }
155 resultWriter_ = writer;
156 auto ctx = resultWriter_->startReport(resultWriter_);
157 if (ctx == nullptr) {
158 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
159 return;
160 }
161 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
162 }
163
StartTakeResults()164 bool StackPreprocess::StartTakeResults()
165 {
166 CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
167
168 std::weak_ptr<StackPreprocess> stackPreprocessPtr(shared_from_this());
169 std::thread demuxer([stackPreprocessPtr] {
170 if (auto ptr = stackPreprocessPtr.lock(); ptr != nullptr) {
171 ptr->TakeResults();
172 }
173 });
174 CHECK_TRUE(demuxer.get_id() != std::thread::id(), false, "demuxer thread invalid");
175
176 thread_ = std::move(demuxer);
177 isStopTakeData_ = false;
178 return true;
179 }
180
StopTakeResults()181 bool StackPreprocess::StopTakeResults()
182 {
183 PROFILER_LOG_INFO(LOG_CORE, "start StopTakeResults");
184 int32_t timerFd = scheduleTaskManager_.ScheduleTask(
185 std::bind(&StackPreprocess::ForceStop, this), WAIT_STOP_TIME, true);
186 if (timerFd == -1) {
187 PROFILER_LOG_ERROR(LOG_CORE, "StopTakeResults ScheduleTask failed!");
188 return false;
189 }
190 if (!dataRepeater_) {
191 while (!isStopTakeData_) {
192 std::this_thread::sleep_for(std::chrono::milliseconds(WAIT_TIME_ONCE));
193 }
194 return true;
195 }
196 CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
197 CHECK_TRUE(thread_.get_id() != std::thread::id(), false, "thread invalid");
198
199 PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join");
200
201 if (thread_.joinable()) {
202 thread_.join();
203 }
204 PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join success");
205 return true;
206 }
207
IntervalFlushRecordStatistics()208 inline void StackPreprocess::IntervalFlushRecordStatistics()
209 {
210 // interval reporting statistics
211 if (hookConfig_.statistics_interval() > 0) {
212 auto currentTime = std::chrono::steady_clock::now();
213 auto elapsedTime = std::chrono::duration_cast<std::chrono::microseconds>(currentTime - lastStatisticsTime_);
214 if (elapsedTime >= statisticsInterval_) {
215 lastStatisticsTime_ = currentTime;
216 FlushRecordStatistics();
217 }
218 }
219 }
220
IntervalFlushApplyAndReleaseMatchData()221 inline void StackPreprocess::IntervalFlushApplyAndReleaseMatchData()
222 {
223 // interval reporting apply and release match data
224 if (hookConfig_.malloc_free_matching_interval() > 0) {
225 static auto lastStatisticsTime = std::chrono::steady_clock::now();
226 auto currentTime = std::chrono::steady_clock::now();
227 auto elapsedTime = std::chrono::duration_cast<std::chrono::seconds>(currentTime - lastStatisticsTime);
228 if (elapsedTime >= applyAndReleaseMatchInterval_) {
229 lastStatisticsTime = currentTime;
230 FlushRecordApplyAndReleaseMatchData();
231 }
232 }
233 }
234
HandleNoStackEvent(RawStackPtr & rawData)235 bool StackPreprocess::HandleNoStackEvent(RawStackPtr& rawData)
236 {
237 if (rawData->stackConext->type == MMAP_FILE_TYPE) {
238 BaseStackRawData* mmapRawData = rawData->stackConext;
239 std::string filePath(reinterpret_cast<char *>(rawData->data));
240 COMMON::AdaptSandboxPath(filePath, rawData->stackConext->pid);
241 PROFILER_LOG_DEBUG(LOG_CORE, "MMAP_FILE_TYPE curMmapAddr=%p, MAP_FIXED=%d, "
242 "PROT_EXEC=%d, offset=%" PRIu64 ", filePath=%s",
243 mmapRawData->addr, mmapRawData->mmapArgs.flags & MAP_FIXED,
244 mmapRawData->mmapArgs.flags & PROT_EXEC, mmapRawData->mmapArgs.offset, filePath.data());
245 std::lock_guard<std::mutex> guard(mtx_);
246 runtime_instance->HandleMapInfo({reinterpret_cast<uint64_t>(mmapRawData->addr),
247 mmapRawData->mallocSize, mmapRawData->mmapArgs.flags, mmapRawData->mmapArgs.offset}, filePath,
248 rawData->stackConext->pid, rawData->stackConext->tid);
249 flushBasicData_ = true;
250 } else if (rawData->stackConext->type == THREAD_NAME_MSG) {
251 std::string threadName = reinterpret_cast<char*>(rawData->data);
252 ReportThreadNameMap(rawData->stackConext->tid, threadName);
253 } else {
254 return false;
255 }
256 return true;
257 }
258
ForceStop()259 void StackPreprocess::ForceStop()
260 {
261 isStopTakeData_ = true;
262 if (dataRepeater_ != nullptr) {
263 dataRepeater_->Close();
264 }
265 }
266
TakeResultsFromShmem(const std::shared_ptr<EventNotifier> & eventNotifier,const std::shared_ptr<ShareMemoryBlock> & shareMemoryBlock)267 void StackPreprocess::TakeResultsFromShmem(const std::shared_ptr<EventNotifier>& eventNotifier,
268 const std::shared_ptr<ShareMemoryBlock>& shareMemoryBlock)
269 {
270 eventNotifier->Take();
271 StackDataRepeater::RawStack rawStack;
272 RawStackPtr rawData(&rawStack, [](StackDataRepeater::RawStack* del) {});
273 while (!isStopTakeData_) {
274 bool ret = shareMemoryBlock->TakeData(
275 [&](const int8_t data[], uint32_t size) -> bool {
276 #ifdef PERFORMANCE_DEBUG
277 struct timespec start = {};
278 clock_gettime(CLOCK_REALTIME, &start);
279 #endif
280 if (size == sizeof(uint64_t)) {
281 uint64_t addr = *reinterpret_cast<uint64_t *>(const_cast<int8_t *>(data));
282 SetFreeStatisticsData(addr);
283 #ifdef PERFORMANCE_DEBUG
284 struct timespec end = {};
285 clock_gettime(CLOCK_REALTIME, &end);
286 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
287 (end.tv_nsec - start.tv_nsec);
288 timeCost += curTimeCost;
289 unwindTimes++;
290 if (unwindTimes % LOG_PRINT_TIMES == 0) {
291 PROFILER_LOG_ERROR(LOG_CORE,
292 "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
293 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
294 }
295 #endif
296 return true;
297 }
298 CHECK_TRUE(size >= sizeof(BaseStackRawData), false, "stack data invalid!");
299 rawData->stackConext = reinterpret_cast<BaseStackRawData *>(const_cast<int8_t *>(data));
300 rawData->data = reinterpret_cast<uint8_t*>(const_cast<int8_t *>(data)) + sizeof(BaseStackRawData);
301 rawData->fpDepth = (size - sizeof(BaseStackRawData)) / sizeof(uint64_t);
302 if (isStopTakeData_) {
303 return false;
304 } else if (rawData->stackConext->type == MEMORY_TAG) {
305 std::string tagName = reinterpret_cast<char*>(rawData->data);
306 SaveMemTag(rawData->stackConext->tagId, tagName);
307 return true;
308 } else if (HandleNoStackEvent(rawData)) {
309 return true;
310 } else if (rawData->stackConext->type == MUNMAP_MSG) {
311 std::lock_guard<std::mutex> guard(mtx_);
312 runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
313 } else if (rawData->stackConext->type == NMD_MSG) {
314 const char* nmdResult = reinterpret_cast<const char*>(rawData->data);
315 lseek(nmdFd_, 0, SEEK_END);
316 (void)write(nmdFd_, nmdResult, strlen(nmdResult));
317 return true;
318 } else if (rawData->stackConext->type == END_MSG) {
319 isStopTakeData_ = true;
320 return true;
321 }
322 {
323 std::lock_guard<std::mutex> guard(mtx_);
324 runtime_instance->UpdateThread(rawData->stackConext->pid, rawData->stackConext->tid);
325 }
326 ReportOfflineSymbolizationData();
327 std::visit([&](auto& stackData) {
328 SetHookData(rawData, stackData);
329 FlushCheck(stackData);
330 }, stackData_);
331 IntervalFlushRecordStatistics();
332 #ifdef PERFORMANCE_DEBUG
333 struct timespec end = {};
334 clock_gettime(CLOCK_REALTIME, &end);
335 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
336 (end.tv_nsec - start.tv_nsec);
337 if (curTimeCost >= LONG_TIME_THRESHOLD) {
338 PROFILER_LOG_ERROR(LOG_CORE, "bigTimeCost %" PRIu64 " event=%d fpDepth=%u",
339 curTimeCost, rawData->stackConext->type, rawData->fpDepth);
340 }
341 timeCost += curTimeCost;
342 unwindTimes++;
343 if (unwindTimes % LOG_PRINT_TIMES == 0) {
344 PROFILER_LOG_ERROR(LOG_CORE, "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
345 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
346 }
347 #endif
348 return true;
349 });
350 if (!ret) {
351 break;
352 }
353 }
354 }
355
TakeResults()356 void StackPreprocess::TakeResults()
357 {
358 if (!dataRepeater_) {
359 return;
360 }
361
362 size_t minStackDepth = hookConfig_.max_stack_depth() > MIN_STACK_DEPTH
363 ? MIN_STACK_DEPTH : hookConfig_.max_stack_depth();
364 if (hookConfig_.blocked()) {
365 minStackDepth = static_cast<size_t>(hookConfig_.max_stack_depth());
366 }
367 minStackDepth += FILTER_STACK_DEPTH;
368 PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, start!", gettid());
369 while (1) {
370 RawStackPtr batchRawStack[MAX_BATCH_CNT] = {nullptr};
371 if (isStopTakeData_) {
372 break;
373 }
374 uint32_t during = 0;
375 if (hookConfig_.statistics_interval() > 0) {
376 auto currentTime = std::chrono::steady_clock::now();
377 auto timeDiff = std::chrono::duration_cast<std::chrono::milliseconds>(currentTime - lastStatisticsTime_);
378 int tempDuring =
379 std::chrono::duration_cast<std::chrono::milliseconds>(statisticsInterval_).count() - timeDiff.count();
380 during = tempDuring > 0 ? static_cast<uint32_t>(tempDuring) : 0;
381 }
382 bool isTimeOut = false;
383 auto result = dataRepeater_->TakeRawData(during, hookDataClockId_, MAX_BATCH_CNT, batchRawStack,
384 hookConfig_.statistics_interval(), isTimeOut);
385 if (hookConfig_.statistics_interval() > 0 && isTimeOut && result == nullptr) { // statistics mode
386 IntervalFlushRecordStatistics();
387 continue;
388 }
389 if (!result) {
390 break;
391 }
392 for (unsigned int i = 0; i < MAX_BATCH_CNT; i++) {
393 auto rawData = batchRawStack[i];
394 if (!rawData || isStopTakeData_) {
395 break;
396 }
397 if (rawData->baseStackData == nullptr) {
398 if (rawData->freeData) {
399 SetFreeStatisticsData(rawData->freeData);
400 }
401 continue;
402 }
403 if (rawData->stackConext == nullptr) {
404 PROFILER_LOG_ERROR(LOG_CORE, "StackPreprocess take results rawData->stackConext is nullptr");
405 continue;
406 }
407 if (rawData->stackConext->type == NMD_MSG) {
408 continue;
409 } else if (rawData->stackConext->type == END_MSG) {
410 isStopTakeData_ = true;
411 break;
412 }
413 #ifdef PERFORMANCE_DEBUG
414 struct timespec start = {};
415 clock_gettime(CLOCK_REALTIME, &start);
416 #endif
417 if (HandleNoStackEvent(rawData)) {
418 continue;
419 } else if (rawData->stackConext->type == MUNMAP_MSG) {
420 std::lock_guard<std::mutex> guard(mtx_);
421 runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
422 }
423
424 if (!rawData->reportFlag) {
425 ignoreCnts_++;
426 if (ignoreCnts_ % LOG_PRINT_TIMES == 0) {
427 PROFILER_LOG_INFO(LOG_CORE, "ignoreCnts_ = %d quene size = %zu\n",
428 ignoreCnts_, dataRepeater_->Size());
429 }
430 continue;
431 }
432 eventCnts_++;
433 if (eventCnts_ % LOG_PRINT_TIMES == 0) {
434 PROFILER_LOG_INFO(LOG_CORE, "eventCnts_ = %d quene size = %zu\n", eventCnts_, dataRepeater_->Size());
435 }
436 callFrames_.clear();
437 if (hookConfig_.fp_unwind()) {
438 FillFpNativeIp(rawData);
439 if (rawData->stackConext->jsChainId > 0 && rawData->jsStackData && hookConfig_.js_stack_report() > 0) {
440 FillFpJsData(rawData);
441 }
442 } else if (rawData->stackConext->type != PR_SET_VMA_MSG) {
443 if (rawData->stackSize == 0) {
444 FillDwarfErrorStack();
445 } else {
446 #if defined(__arm__)
447 uint32_t *regAddrArm = reinterpret_cast<uint32_t *>(rawData->data);
448 u64regs_.assign(regAddrArm, regAddrArm + PERF_REG_ARM_MAX);
449 #else
450 if (memcpy_s(u64regs_.data(), sizeof(uint64_t) * PERF_REG_ARM64_MAX, rawData->data,
451 sizeof(uint64_t) * PERF_REG_ARM64_MAX) != EOK) {
452 PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s regs failed");
453 }
454 #endif
455 }
456 }
457 #ifdef PERFORMANCE_DEBUG
458 size_t realFrameDepth = callFrames_.size();
459 #endif
460 size_t stackDepth = ((size_t)hookConfig_.max_stack_depth() > MAX_CALL_FRAME_UNWIND_SIZE)
461 ? MAX_CALL_FRAME_UNWIND_SIZE
462 : hookConfig_.max_stack_depth() + FILTER_STACK_DEPTH;
463 if (rawData->reduceStackFlag) {
464 stackDepth = minStackDepth;
465 }
466 if ((hookConfig_.fp_unwind()) || rawData->stackSize > 0) {
467 std::lock_guard<std::mutex> guard(mtx_);
468 if (rawData->stackConext->type != PR_SET_VMA_MSG) {
469 bool ret = runtime_instance->UnwindStack(u64regs_, rawData->stackData, rawData->stackSize,
470 rawData->stackConext->pid, rawData->stackConext->tid, callFrames_, stackDepth);
471 if (!ret) {
472 PROFILER_LOG_ERROR(LOG_CORE, "unwind fatal error");
473 continue;
474 }
475 }
476 }
477 if (((hookConfig_.fp_unwind()) || rawData->stackSize > 0) &&
478 (rawData->stackConext->type != PR_SET_VMA_MSG)) {
479 ReportOfflineSymbolizationData();
480 }
481 std::visit([&](auto& stackData) {
482 if (hookConfig_.save_file() && hookConfig_.file_name() != "" && isHookStandaloneSerialize_) {
483 SetHookData(rawData, callFrames_, stackData);
484 } else if (hookConfig_.save_file() && hookConfig_.file_name() != "") {
485 WriteFrames(rawData, callFrames_);
486 } else if (!hookConfig_.save_file()) {
487 if (hookConfig_.malloc_free_matching_interval() > 0) {
488 SetApplyAndReleaseMatchFrame(rawData, callFrames_, stackData);
489 } else {
490 SetHookData(rawData, callFrames_, stackData);
491 }
492 }
493 }, stackData_);
494
495 #ifdef PERFORMANCE_DEBUG
496 struct timespec end = {};
497 clock_gettime(CLOCK_REALTIME, &end);
498 uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
499 (end.tv_nsec - start.tv_nsec);
500 if (curTimeCost >= LONG_TIME_THRESHOLD) {
501 PROFILER_LOG_ERROR(LOG_CORE, "bigTimeCost %" PRIu64 " event=%d, realFrameDepth=%zu, "
502 "callFramesDepth=%zu\n",
503 curTimeCost, rawData->stackConext->type, realFrameDepth, callFrames_.size());
504 }
505 timeCost += curTimeCost;
506 unwindTimes++;
507 if (unwindTimes % LOG_PRINT_TIMES == 0) {
508 PROFILER_LOG_ERROR(LOG_CORE, "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
509 unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
510 }
511 #endif
512 } // for
513 for (unsigned int i = 0; i < MAX_BATCH_CNT; i++) {
514 if (!batchRawStack[i]) {
515 break;
516 }
517 dataRepeater_->ReturnRawStack(std::move(batchRawStack[i]));
518 }
519 if (hookConfig_.save_file() && hookConfig_.file_name() != "" && !isHookStandaloneSerialize_) {
520 continue;
521 }
522 if (hookConfig_.statistics_interval() == 0) {
523 std::visit([&](auto& stackData) {
524 FlushCheck(stackData);
525 }, stackData_);
526 }
527 IntervalFlushRecordStatistics();
528 IntervalFlushApplyAndReleaseMatchData();
529 } // while
530 PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, exit!", gettid());
531 }
532
ReportThreadNameMap(uint32_t tid,const std::string & tname)533 inline void StackPreprocess::ReportThreadNameMap(uint32_t tid, const std::string& tname)
534 {
535 std::lock_guard<std::mutex> guard(mtx_);
536 auto it = threadNameMap_.find(tid);
537 if (it == threadNameMap_.end() || it->second != tname) {
538 threadNameMap_[tid] = tname;
539 std::visit([&](auto& stackData) {
540 auto hookData = stackData.add_events();
541 auto thread = hookData->mutable_thread_name_map();
542 thread->set_id(tid);
543 thread->set_name(tname);
544 thread->set_pid(pid_);
545 FlushCheck(stackData);
546 }, stackData_);
547 }
548 }
549
550 template <typename T>
FillOfflineCallStack(std::vector<CallFrame> & callFrames,size_t idx,T & stackData)551 inline void StackPreprocess::FillOfflineCallStack(std::vector<CallFrame>& callFrames, size_t idx, T& stackData)
552 {
553 for (; idx < callFrames.size(); ++idx) {
554 if (callFrames[idx].isJsFrame_) {
555 ReportFrameMap(callFrames[idx], stackData);
556 callStack_.push_back(callFrames[idx].callFrameId_ | JS_OFFLINE_IP_MASK);
557 continue;
558 }
559 callStack_.push_back(callFrames[idx].ip_);
560 }
561 }
562
563 template <typename T>
FillCallStack(std::vector<CallFrame> & callFrames,size_t idx,T & stackData)564 inline void StackPreprocess::FillCallStack(std::vector<CallFrame>& callFrames, size_t idx, T& stackData)
565 {
566 for (; idx < callFrames.size(); ++idx) {
567 ReportFrameMap(callFrames[idx], stackData);
568 // for call stack id
569 callStack_.push_back(callFrames[idx].callFrameId_);
570 }
571 }
572
FindCallStackId(std::vector<uint64_t> & callStack)573 inline uint32_t StackPreprocess::FindCallStackId(std::vector<uint64_t>& callStack)
574 {
575 auto itStack = callStackMap_.find(callStack);
576 if (itStack != callStackMap_.end()) {
577 return itStack->second;
578 }
579 return 0;
580 }
581
582 /**
583 * @return '0' is invalid stack id, '> 0' is valid stack id
584 */
585 template <typename T>
SetCallStackMap(T & stackData)586 inline uint32_t StackPreprocess::SetCallStackMap(T& stackData)
587 {
588 uint32_t stackId = 0;
589 auto hookData = stackData.add_events();
590 auto stackmap = hookData->mutable_stack_map();
591 stackId = callStackMap_.size() + 1;
592 stackmap->set_id(stackId);
593 // offline symbolization use ip, other use frame_map_id
594 if (hookConfig_.offline_symbolization()) {
595 if constexpr (std::is_same<T, ProtoEncoder::BatchNativeHookData>::value) {
596 stackmap->add_ip(callStack_);
597 } else {
598 for (size_t i = 0; i < callStack_.size(); i++) {
599 stackmap->add_ip(callStack_[i]);
600 }
601 }
602 } else {
603 if constexpr (std::is_same<T, ProtoEncoder::BatchNativeHookData>::value) {
604 stackmap->add_frame_map_id(callStack_);
605 } else {
606 for (size_t i = 0; i < callStack_.size(); i++) {
607 stackmap->add_frame_map_id(callStack_[i]);
608 }
609 }
610 }
611 stackmap->set_pid(pid_);
612 callStackMap_[callStack_] = stackId;
613 return stackId;
614 }
615
616 /**
617 * @return '0' is invalid stack id, '> 0' is valid stack id
618 */
619 template <typename T>
GetCallStackId(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T & stackData)620 inline uint32_t StackPreprocess::GetCallStackId(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
621 T& stackData)
622 {
623 // ignore the first two frame if dwarf unwind
624 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
625 // if free_stack_report or munmap_stack_report is false, don't need to record.
626 if ((rawStack->stackConext->type == FREE_MSG) && !hookConfig_.free_stack_report()) {
627 return 0;
628 } else if ((rawStack->stackConext->type == MUNMAP_MSG) && !hookConfig_.munmap_stack_report()) {
629 return 0;
630 }
631 callStack_.clear();
632 bool isNapi = false;
633 if (rawStack->stackConext->type == MEMORY_USING_MSG && hookConfig_.js_stack_report() == NAPI_CALL_STACK) {
634 std::string tagName;
635 GetMemTag(rawStack->stackConext->tagId, tagName);
636 if (tagName.find("napi") != std::string::npos) {
637 callStack_.reserve(callFrames.size() + 1); // 1 : insert a frame
638 if (!hookConfig_.offline_symbolization()) {
639 callStack_.push_back(DWARF_NAPI_CALLBACK + napiIndex_);
640 } else {
641 // just for offline symbolization
642 callStack_.push_back((DWARF_NAPI_CALLBACK + napiIndex_) | JS_OFFLINE_IP_MASK);
643 }
644 isNapi = true;
645 }
646 } else {
647 callStack_.reserve(callFrames.size());
648 }
649 if (!hookConfig_.offline_symbolization()) {
650 FillCallStack(callFrames, idx, stackData);
651 } else {
652 if ((!hookConfig_.fp_unwind()) && rawStack->stackSize == 0) {
653 idx = 0;
654 }
655 FillOfflineCallStack(callFrames, idx, stackData);
656 }
657 if (isNapi) {
658 // insert a frame
659 std::string tagName;
660 GetMemTag(rawStack->stackConext->tagId, tagName);
661 FillNapiStack(tagName, callFrames, napiIndex_);
662 ReportFrameMap(callFrames.back(), stackData);
663 ++napiIndex_;
664 }
665 // return call stack id
666 std::lock_guard<std::mutex> guard(mtx_);
667 uint32_t stackId = FindCallStackId(callStack_);
668 if (stackId > 0) {
669 return stackId;
670 } else {
671 return SetCallStackMap(stackData);
672 }
673 }
674
675 template <typename T>
SetEventFrame(const ReportEventBaseData & rawStack,T * event,uint32_t stackMapId,const std::string & type)676 void StackPreprocess::SetEventFrame(const ReportEventBaseData& rawStack,
677 T* event, uint32_t stackMapId, const std::string& type)
678 {
679 event->set_pid(pid_);
680 event->set_tid(rawStack.tid);
681 event->set_addr(rawStack.addr);
682 if constexpr (std::is_same<T, ::MmapEvent>::value || std::is_same<T, ProtoEncoder::MmapEvent>::value) {
683 event->set_type(type);
684 }
685
686 if constexpr (!std::is_same<T, ::FreeEvent>::value && !std::is_same<T, ProtoEncoder::FreeEvent>::value) {
687 auto size = static_cast<uint64_t>(rawStack.mallocSize);
688 #ifdef USE_JEMALLOC
689 if constexpr (std::is_same<T, ::AllocEvent>::value || std::is_same<T, ProtoEncoder::AllocEvent>::value) {
690 size = static_cast<uint64_t>(ComputeAlign(size));
691 }
692 #endif
693 event->set_size(size);
694 }
695 if (hookConfig_.callframe_compress() && stackMapId != 0) {
696 event->set_thread_name_id(rawStack.tid);
697 event->set_stack_id(stackMapId);
698 }
699 event->set_thread_name_id(rawStack.tid);
700 }
701
702 template <typename T>
SetEventFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T * event,uint32_t stackMapId,const std::string & type)703 void StackPreprocess::SetEventFrame(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
704 T* event, uint32_t stackMapId, const std::string& type)
705 {
706 // ignore the first two frame if dwarf unwind
707 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
708 event->set_pid(rawStack->stackConext->pid);
709 event->set_tid(rawStack->stackConext->tid);
710 event->set_addr((uint64_t)rawStack->stackConext->addr);
711
712 if constexpr (std::is_same<T, ::MmapEvent>::value || std::is_same<T, ProtoEncoder::MmapEvent>::value) {
713 event->set_type(type);
714 }
715
716 if constexpr (!std::is_same<T, ::FreeEvent>::value && !std::is_same<T, ProtoEncoder::FreeEvent>::value) {
717 auto size = static_cast<uint64_t>(rawStack->stackConext->mallocSize);
718 #ifdef USE_JEMALLOC
719 if constexpr (std::is_same<T, ::AllocEvent>::value || std::is_same<T, ProtoEncoder::AllocEvent>::value) {
720 size = static_cast<uint64_t>(ComputeAlign(size));
721 }
722 #endif
723 event->set_size(size);
724 }
725
726 if (hookConfig_.callframe_compress() && stackMapId != 0) {
727 event->set_thread_name_id(rawStack->stackConext->tid);
728 event->set_stack_id(stackMapId);
729 } else {
730 for (; idx < callFrames.size(); ++idx) {
731 auto frame = event->add_frame_info();
732 SetFrameInfo(*frame, callFrames[idx]);
733 }
734 event->set_thread_name_id(rawStack->stackConext->tid);
735 }
736 }
737
FillNapiStack(std::string & tagName,std::vector<CallFrame> & callFrames,uint64_t napiIndex)738 void StackPreprocess::FillNapiStack(std::string& tagName, std::vector<CallFrame>& callFrames, uint64_t napiIndex)
739 {
740 CallFrame& jsCallFrame = callFrames_.emplace_back(0);
741 jsCallFrame.symbolName_ = tagName;
742 jsCallFrame.isJsFrame_ = true;
743 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
744 jsCallFrame.needReport_ |= SYMBOL_NAME_ID_REPORT;
745 jsCallFrame.needReport_ |= FILE_PATH_ID_REPORT;
746 jsCallFrame.callFrameId_ = DWARF_NAPI_CALLBACK + napiIndex;
747 jsCallFrame.symbolNameId_ = DWARF_NAPI_CALLBACK + napiIndex;
748 jsCallFrame.filePathId_ = DWARF_NAPI_CALLBACK + napiIndex;
749 jsCallFrame.filePath_ = "no-napi-file-path";
750 }
751
752 template <typename T>
SetAllocStatisticsFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,T & stackData)753 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack, std::vector<CallFrame>& callFrames,
754 T& stackData)
755 {
756 // ignore the first two frame if dwarf unwind
757 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
758 callStack_.clear();
759 bool isNapi = false;
760 if (hookConfig_.js_stack_report() == NAPI_CALL_STACK) {
761 std::string tagName;
762 GetMemTag(rawStack->stackConext->tagId, tagName);
763 if (tagName.find("napi") != std::string::npos) {
764 callStack_.reserve(callFrames.size() + FRAME_DEPTH); // insert a frame
765 if (!hookConfig_.offline_symbolization()) {
766 callStack_.push_back(DWARF_NAPI_CALLBACK + napiIndex_);
767 } else {
768 // just for offline symbolization
769 callStack_.push_back((DWARF_NAPI_CALLBACK + napiIndex_) | JS_OFFLINE_IP_MASK);
770 }
771 isNapi = true;
772 }
773 } else {
774 callStack_.reserve(callFrames.size() + 1);
775 }
776 callStack_.push_back(rawStack->stackConext->mallocSize | SIZE_MASK);
777 if (!hookConfig_.offline_symbolization()) {
778 FillCallStack(callFrames, idx, stackData);
779 } else {
780 FillOfflineCallStack(callFrames, idx, stackData);
781 }
782 // insert a frame
783 if (isNapi) {
784 std::string tagName;
785 GetMemTag(rawStack->stackConext->tagId, tagName);
786 FillNapiStack(tagName, callFrames, napiIndex_);
787 ReportFrameMap(callFrames.back(), stackData);
788 ++napiIndex_;
789 }
790 std::lock_guard<std::mutex> guard(mtx_);
791 // by call stack id set alloc statistics data.
792 uint32_t stackId = FindCallStackId(callStack_);
793 if (stackId > 0) {
794 SetAllocStatisticsData(rawStack, stackId, true);
795 } else {
796 stackId = SetCallStackMap(stackData);
797 statisticsModelFlushCallstack_ = true;
798 SetAllocStatisticsData(rawStack, stackId);
799 }
800 }
801
802 template <typename T>
SetAllocStatisticsFrame(const RawStackPtr & rawStack,T & stackData)803 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack, T& stackData)
804 {
805 callStack_.resize(rawStack->fpDepth + 1);
806 callStack_[0] = (rawStack->stackConext->mallocSize | SIZE_MASK);
807 if (memcpy_s(callStack_.data() + 1, sizeof(uint64_t) * rawStack->fpDepth,
808 rawStack->data, sizeof(uint64_t) * rawStack->fpDepth) != EOK) {
809 PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s callStack_ failed");
810 return;
811 }
812 std::lock_guard<std::mutex> guard(mtx_);
813 // by call stack id set alloc statistics data.
814 uint32_t stackId = FindCallStackId(callStack_);
815 if (stackId > 0) {
816 SetAllocStatisticsData(rawStack, stackId, true);
817 } else {
818 stackId = SetCallStackMap(stackData);
819 statisticsModelFlushCallstack_ = true;
820 SetAllocStatisticsData(rawStack, stackId);
821 }
822 }
823
824 template <typename T>
SetHookData(RawStackPtr rawStack,T & stackData)825 void StackPreprocess::SetHookData(RawStackPtr rawStack, T& stackData)
826 {
827 if (hookConfig_.statistics_interval() > 0) {
828 // statistical reporting must is compressed and accurate.
829 switch (rawStack->stackConext->type) {
830 case FREE_MSG:
831 case MUNMAP_MSG:
832 case MEMORY_UNUSING_MSG: {
833 SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
834 break;
835 }
836 case MALLOC_MSG:
837 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
838 case MMAP_MSG:
839 case MMAP_FILE_PAGE_MSG:
840 case MEMORY_USING_MSG: {
841 SetAllocStatisticsFrame(rawStack, stackData);
842 break;
843 }
844 case PR_SET_VMA_MSG: {
845 break;
846 }
847 default: {
848 PROFILER_LOG_ERROR(LOG_CORE, "statistics event type: error");
849 break;
850 }
851 }
852 return;
853 }
854 }
855
ReportOfflineSymbolizationData()856 void StackPreprocess::ReportOfflineSymbolizationData()
857 {
858 if (hookConfig_.offline_symbolization() && flushBasicData_) {
859 SetMapsInfo();
860 flushBasicData_ = false;
861 }
862 }
863
864 template <typename T>
SetApplyAndReleaseMatchFrame(RawStackPtr rawStack,std::vector<CallFrame> & callFrames,T & stackData)865 void StackPreprocess::SetApplyAndReleaseMatchFrame(RawStackPtr rawStack, std::vector<CallFrame>& callFrames,
866 T& stackData)
867 {
868 uint32_t stackMapId = 0;
869 if (rawStack->stackConext->type != PR_SET_VMA_MSG) {
870 stackMapId = GetCallStackId(rawStack, callFrames, stackData);
871 } else {
872 rawStack->stackConext->tagId = prctlPeriodTags_.size();
873 prctlPeriodTags_.emplace_back(reinterpret_cast<char*>(rawStack->data));
874 applyAndReleaseMatchPeriodListData_.emplace_back(rawStack->stackConext);
875 }
876 if (rawStack->stackConext->type == MALLOC_MSG) {
877 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
878 } else if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
879 return;
880 }
881 uint64_t addr = reinterpret_cast<uint64_t>(rawStack->stackConext->addr);
882 auto iter = applyAndReleaseMatchIntervallMap_.find(addr);
883 if (iter != applyAndReleaseMatchIntervallMap_.end()) {
884 applyAndReleaseMatchPeriodListData_.erase(iter->second);
885 applyAndReleaseMatchIntervallMap_.erase(addr);
886 } else {
887 applyAndReleaseMatchPeriodListData_.emplace_back(rawStack->stackConext, stackMapId);
888 applyAndReleaseMatchIntervallMap_.emplace(addr, std::prev(applyAndReleaseMatchPeriodListData_.end()));
889 }
890 }
891
892 template <typename T>
SetHookData(RawStackPtr rawStack,std::vector<CallFrame> & callFrames,T & stackData)893 void StackPreprocess::SetHookData(RawStackPtr rawStack, std::vector<CallFrame>& callFrames, T& stackData)
894 {
895 // statistical reporting must is compressed and accurate.
896 if (hookConfig_.statistics_interval() > 0) {
897 switch (rawStack->stackConext->type) {
898 case FREE_MSG:
899 case MUNMAP_MSG:
900 case MEMORY_UNUSING_MSG: {
901 SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
902 break;
903 }
904 case MALLOC_MSG:
905 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
906 case MMAP_MSG:
907 case MMAP_FILE_PAGE_MSG:
908 case MEMORY_USING_MSG: {
909 SetAllocStatisticsFrame(rawStack, callFrames, stackData);
910 break;
911 }
912 case PR_SET_VMA_MSG: {
913 break;
914 }
915 default: {
916 PROFILER_LOG_ERROR(LOG_CORE, "statistics event type:%d error", rawStack->stackConext->type);
917 break;
918 }
919 }
920 return;
921 }
922
923 uint32_t stackMapId = 0;
924 if (hookConfig_.callframe_compress() &&
925 !(rawStack->stackConext->type == MEMORY_TAG || rawStack->stackConext->type == PR_SET_VMA_MSG)) {
926 stackMapId = GetCallStackId(rawStack, callFrames, stackData);
927 }
928
929 if ((!hookConfig_.callframe_compress() || stackMapId == 0) && hookConfig_.string_compressed()) {
930 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
931 for (; idx < callFrames.size(); ++idx) {
932 ReportSymbolNameMap(callFrames[idx], stackData);
933 ReportFilePathMap(callFrames[idx], stackData);
934 }
935 }
936
937 auto hookData = stackData.add_events();
938 hookData->set_tv_sec(rawStack->stackConext->ts.tv_sec);
939 hookData->set_tv_nsec(rawStack->stackConext->ts.tv_nsec);
940
941 if (rawStack->stackConext->type == MALLOC_MSG) {
942 auto allocEvent = hookData->mutable_alloc_event();
943 SetEventFrame(rawStack, callFrames, allocEvent, stackMapId);
944 } else if (rawStack->stackConext->type == FREE_MSG) {
945 auto freeEvent = hookData->mutable_free_event();
946 SetEventFrame(rawStack, callFrames, freeEvent, stackMapId);
947 } else if (rawStack->stackConext->type == MMAP_MSG) {
948 auto mmapEvent = hookData->mutable_mmap_event();
949 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId);
950 } else if (rawStack->stackConext->type == MMAP_FILE_PAGE_MSG) {
951 auto mmapEvent = hookData->mutable_mmap_event();
952 const std::string prefix = "FilePage:";
953 std::string tagName;
954 if (GetMemTag(rawStack->stackConext->tagId, tagName)) {
955 tagName = prefix + tagName;
956 }
957 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId, tagName);
958 } else if (rawStack->stackConext->type == MUNMAP_MSG) {
959 auto munmapEvent = hookData->mutable_munmap_event();
960 SetEventFrame(rawStack, callFrames, munmapEvent, stackMapId);
961 } else if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
962 auto tagEvent = hookData->mutable_tag_event();
963 const std::string prefix = "Anonymous:";
964 std::string tagName(reinterpret_cast<char*>(rawStack->data));
965 tagEvent->set_addr((uint64_t)rawStack->stackConext->addr);
966 tagEvent->set_size(rawStack->stackConext->mallocSize);
967 tagEvent->set_tag(prefix + tagName);
968 tagEvent->set_pid(pid_);
969 } else if (rawStack->stackConext->type == MEMORY_USING_MSG) {
970 auto mmapEvent = hookData->mutable_mmap_event();
971 std::string tagName;
972 GetMemTag(rawStack->stackConext->tagId, tagName);
973 SetEventFrame(rawStack, callFrames, mmapEvent, stackMapId, tagName);
974 } else if (rawStack->stackConext->type == MEMORY_UNUSING_MSG) {
975 auto munmapEvent = hookData->mutable_munmap_event();
976 SetEventFrame(rawStack, callFrames, munmapEvent, stackMapId);
977 }
978 }
979
SetFreeStatisticsData(uint64_t addr)980 inline bool StackPreprocess::SetFreeStatisticsData(uint64_t addr)
981 {
982 // through the addr lookup record
983 auto addrIter = allocAddrMap_.find(addr);
984 if (addrIter != allocAddrMap_.end()) {
985 auto& record = addrIter->second.second;
986 ++record->releaseCount;
987 record->releaseSize += addrIter->second.first;
988 statisticsPeriodData_[record->callstackId] = record;
989 allocAddrMap_.erase(addr);
990 return true;
991 }
992 return false;
993 }
994
SetAllocStatisticsData(const RawStackPtr & rawStack,size_t stackId,bool isExists)995 inline void StackPreprocess::SetAllocStatisticsData(const RawStackPtr& rawStack, size_t stackId, bool isExists)
996 {
997 // if the record exists, it is updated.Otherwise Add
998 if (isExists) {
999 auto recordIter = recordStatisticsMap_.find(stackId);
1000 if (recordIter != recordStatisticsMap_.end()) {
1001 auto& record = recordIter->second;
1002 ++record.applyCount;
1003 record.applySize += rawStack->stackConext->mallocSize;
1004 allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
1005 std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
1006 statisticsPeriodData_[stackId] = &recordIter->second;
1007 }
1008 } else {
1009 RecordStatistic record;
1010 record.pid = rawStack->stackConext->pid;
1011 record.callstackId = stackId;
1012 record.applyCount = 1;
1013 record.applySize = rawStack->stackConext->mallocSize;
1014 switch (rawStack->stackConext->type) {
1015 case MALLOC_MSG: {
1016 record.type = RecordStatisticsEvent::MALLOC;
1017 break;
1018 }
1019 case MMAP_MSG: {
1020 record.type = RecordStatisticsEvent::MMAP;
1021 break;
1022 }
1023 case MMAP_FILE_PAGE_MSG: {
1024 record.type = RecordStatisticsEvent::FILE_PAGE_MSG;
1025 break;
1026 }
1027 case MEMORY_USING_MSG: {
1028 record.type = RecordStatisticsEvent::MEMORY_USING_MSG;
1029 record.tagId = rawStack->stackConext->tagId;
1030 break;
1031 }
1032 default: {
1033 PROFILER_LOG_ERROR(LOG_CORE, "SetAllocStatisticsData event type error");
1034 break;
1035 }
1036 }
1037
1038 auto [recordIter, stat] = recordStatisticsMap_.emplace(stackId, record);
1039 allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
1040 std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
1041 statisticsPeriodData_[stackId] = &recordIter->second;
1042 }
1043 }
1044
WriteFrames(RawStackPtr rawStack,const std::vector<CallFrame> & callFrames)1045 void StackPreprocess::WriteFrames(RawStackPtr rawStack, const std::vector<CallFrame>& callFrames)
1046 {
1047 CHECK_TRUE(fpHookData_ != nullptr, NO_RETVAL, "fpHookData_ is nullptr, please check file_name(%s)",
1048 hookConfig_.file_name().c_str());
1049 if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
1050 const std::string prefix = "Anonymous:";
1051 std::string tagName(reinterpret_cast<char*>(rawStack->data));
1052 fprintf(fpHookData_, "prctl;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ":tag:%s\n",
1053 rawStack->stackConext->pid, rawStack->stackConext->tid,
1054 (int64_t)rawStack->stackConext->ts.tv_sec, rawStack->stackConext->ts.tv_nsec,
1055 (uint64_t)rawStack->stackConext->addr, (prefix + tagName).c_str());
1056 return;
1057 }
1058 std::string tag = "";
1059 switch (rawStack->stackConext->type) {
1060 case FREE_MSG:
1061 tag = "free";
1062 break;
1063 case MALLOC_MSG:
1064 tag = "malloc";
1065 break;
1066 case MMAP_MSG:
1067 tag = "mmap";
1068 break;
1069 case MUNMAP_MSG:
1070 tag = "munmap";
1071 break;
1072 default:
1073 break;
1074 }
1075
1076 fprintf(fpHookData_, "%s;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ";%zu\n", tag.c_str(),
1077 rawStack->stackConext->pid, rawStack->stackConext->tid, (int64_t)rawStack->stackConext->ts.tv_sec,
1078 rawStack->stackConext->ts.tv_nsec, (uint64_t)rawStack->stackConext->addr, rawStack->stackConext->mallocSize);
1079 size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
1080 for (; idx < callFrames.size(); ++idx) {
1081 (void)fprintf(fpHookData_, "0x%" PRIx64 ";0x%" PRIx64 ";%s;%s;0x%" PRIx64 ";%" PRIu64 "\n",
1082 callFrames[idx].ip_, callFrames[idx].sp_, std::string(callFrames[idx].symbolName_).c_str(),
1083 std::string(callFrames[idx].filePath_).c_str(), callFrames[idx].offset_, callFrames[idx].symbolOffset_);
1084 }
1085 }
1086
1087 template <typename T>
SetFrameInfo(T & frame,CallFrame & callFrame)1088 inline void StackPreprocess::SetFrameInfo(T& frame, CallFrame& callFrame)
1089 {
1090 frame.set_ip(callFrame.ip_);
1091 if (hookConfig_.offline_symbolization()) {
1092 // when js mixes offline symbols, the js call stack is reported according to the online symbolization
1093 if (callFrame.isJsFrame_ && callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0) {
1094 frame.set_sp(callFrame.sp_);
1095 frame.set_offset(callFrame.offset_);
1096 frame.set_symbol_offset(callFrame.symbolOffset_);
1097 frame.set_symbol_name_id(callFrame.symbolNameId_);
1098 frame.set_file_path_id(callFrame.filePathId_);
1099 }
1100 return;
1101 }
1102 frame.set_sp(callFrame.sp_);
1103 if (!(callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0)) {
1104 frame.set_symbol_name(std::string(callFrame.symbolName_));
1105 frame.set_file_path(std::string(callFrame.filePath_));
1106 }
1107 frame.set_offset(callFrame.offset_);
1108 frame.set_symbol_offset(callFrame.symbolOffset_);
1109 if (callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0) {
1110 frame.set_symbol_name_id(callFrame.symbolNameId_);
1111 frame.set_file_path_id(callFrame.filePathId_);
1112 }
1113 }
1114
1115 template <typename T>
ReportSymbolNameMap(CallFrame & callFrame,T & stackData)1116 inline void StackPreprocess::ReportSymbolNameMap(CallFrame& callFrame, T& stackData)
1117 {
1118 if (callFrame.needReport_ & SYMBOL_NAME_ID_REPORT) {
1119 auto hookData = stackData.add_events();
1120 auto symbolMap = hookData->mutable_symbol_name();
1121 symbolMap->set_id(callFrame.symbolNameId_);
1122 symbolMap->set_name(std::string(callFrame.symbolName_));
1123 symbolMap->set_pid(pid_);
1124 }
1125 }
1126
1127 template <typename T>
ReportFilePathMap(CallFrame & callFrame,T & stackData)1128 inline void StackPreprocess::ReportFilePathMap(CallFrame& callFrame, T& stackData)
1129 {
1130 if (callFrame.needReport_ & FILE_PATH_ID_REPORT) {
1131 auto hookData = stackData.add_events();
1132 auto filePathMap = hookData->mutable_file_path();
1133 filePathMap->set_id(callFrame.filePathId_);
1134 filePathMap->set_name(std::string(callFrame.filePath_));
1135 filePathMap->set_pid(pid_);
1136 }
1137 }
1138
1139 template <typename T>
ReportFrameMap(CallFrame & callFrame,T & stackData)1140 inline void StackPreprocess::ReportFrameMap(CallFrame& callFrame, T& stackData)
1141 {
1142 if (callFrame.needReport_ & CALL_FRAME_REPORT) {
1143 if ((!hookConfig_.fp_unwind()) && callFrame.callFrameId_ == DWARF_ERROR_ID && !unwindFailReport_) {
1144 return;
1145 } else if ((!hookConfig_.fp_unwind()) && callFrame.callFrameId_ == DWARF_ERROR_ID && unwindFailReport_) {
1146 unwindFailReport_ = false;
1147 }
1148 ReportSymbolNameMap(callFrame, stackData);
1149 ReportFilePathMap(callFrame, stackData);
1150 auto hookData = stackData.add_events();
1151 auto frameMap = hookData->mutable_frame_map();
1152 frameMap->set_id(callFrame.callFrameId_);
1153 auto frame = frameMap->mutable_frame();
1154 SetFrameInfo(*frame, callFrame);
1155 frameMap->set_pid(pid_);
1156 }
1157 }
1158
SetMapsInfo()1159 void StackPreprocess::SetMapsInfo()
1160 {
1161 std::lock_guard<std::mutex> guard(mtx_);
1162 for (auto& itemSoBegin : runtime_instance->GetOfflineMaps()) {
1163 auto& maps = runtime_instance->GetMapsCache();
1164 auto mapsIter = maps.find(itemSoBegin);
1165 if (mapsIter == maps.end()) {
1166 continue;
1167 }
1168
1169 ElfSymbolTable symbolInfo;
1170 auto& curMemMaps = mapsIter->second;
1171 GetSymbols(curMemMaps->name_, symbolInfo);
1172 if (symbolInfo.symEntSize == 0) {
1173 continue;
1174 }
1175 std::visit([&](auto& stackData) {
1176 auto hookData = stackData.add_events();
1177 auto filepathMap = hookData->mutable_file_path();
1178 filepathMap->set_id(curMemMaps->filePathId_);
1179 filepathMap->set_name(curMemMaps->name_);
1180 filepathMap->set_pid(pid_);
1181 SetSymbolInfo(curMemMaps->filePathId_, symbolInfo, stackData);
1182
1183 for (auto& map : curMemMaps->GetMaps()) {
1184 if (map->prots & PROT_EXEC) {
1185 auto nativeHookData = stackData.add_events();
1186 auto mapSerialize = nativeHookData->mutable_maps_info();
1187 mapSerialize->set_pid(pid_);
1188 mapSerialize->set_start(map->begin);
1189 mapSerialize->set_end(map->end);
1190 mapSerialize->set_offset(map->offset);
1191 mapSerialize->set_file_path_id(curMemMaps->filePathId_);
1192 }
1193 }
1194 FlushData(stackData);
1195 }, stackData_);
1196 }
1197 runtime_instance->ClearOfflineMaps();
1198 }
1199
1200 template <typename T>
SetSymbolInfo(uint32_t filePathId,ElfSymbolTable & symbolInfo,T & batchNativeHookData)1201 void StackPreprocess::SetSymbolInfo(uint32_t filePathId, ElfSymbolTable& symbolInfo, T& batchNativeHookData)
1202 {
1203 if (symbolInfo.symEntSize == 0) {
1204 PROFILER_LOG_ERROR(LOG_CORE, "SetSymbolInfo get symbolInfo failed");
1205 return;
1206 }
1207 auto hookData = batchNativeHookData.add_events();
1208 auto symTable = hookData->mutable_symbol_tab();
1209 symTable->set_file_path_id(filePathId);
1210 symTable->set_text_exec_vaddr(symbolInfo.textVaddr);
1211 symTable->set_text_exec_vaddr_file_offset(symbolInfo.textOffset);
1212 symTable->set_sym_entry_size(symbolInfo.symEntSize);
1213 symTable->set_sym_table(symbolInfo.symTable.data(), symbolInfo.symTable.size());
1214 symTable->set_str_table(symbolInfo.strTable.data(), symbolInfo.strTable.size());
1215 symTable->set_pid(pid_);
1216 }
1217
1218 template <typename T>
FlushCheck(T & stackData)1219 void StackPreprocess::FlushCheck(T& stackData)
1220 {
1221 if (hookConfig_.statistics_interval() > 0) {
1222 if (!statisticsModelFlushCallstack_) {
1223 return;
1224 }
1225 if constexpr (std::is_same<T, ::BatchNativeHookData>::value) {
1226 FlushData(stackData);
1227 } else {
1228 uint64_t dataLen = static_cast<uint64_t>(stackData.Size());
1229 if (dataLen > flushSize_) {
1230 FlushData(stackData);
1231 }
1232 }
1233 statisticsModelFlushCallstack_ = false;
1234 } else {
1235 FlushData(stackData);
1236 }
1237 }
1238
FlushData(BatchNativeHookData & stackData)1239 void StackPreprocess::FlushData(BatchNativeHookData& stackData)
1240 {
1241 if (buffer_ == nullptr) {
1242 return;
1243 }
1244 if (stackData.events().size() > 0) {
1245 size_t length = stackData.ByteSizeLong();
1246 stackData.SerializeToArray(buffer_.get(), length);
1247 if (length < bufferSize_) {
1248 if (isHookStandaloneSerialize_) {
1249 std::string str;
1250 ForStandard::BatchNativeHookData StandardStackData;
1251 StandardStackData.ParseFromArray(buffer_.get(), length);
1252 google::protobuf::TextFormat::PrintToString(StandardStackData, &str);
1253 size_t n = fwrite(str.data(), 1, str.size(), fpHookData_);
1254 fflush(fpHookData_);
1255 std::get<::BatchNativeHookData>(stackData_).clear_events();
1256 PROFILER_LOG_DEBUG(LOG_CORE, "Flush Data fwrite n = %zu str.size() = %zu", n, str.size());
1257 } else {
1258 Flush(buffer_.get(), length);
1259 }
1260 } else {
1261 PROFILER_LOG_ERROR(LOG_CORE, "the data is larger than MAX_BUFFER_SIZE, flush failed");
1262 }
1263 }
1264 }
1265
FlushData(ProtoEncoder::BatchNativeHookData & stackData)1266 void StackPreprocess::FlushData(ProtoEncoder::BatchNativeHookData& stackData)
1267 {
1268 if (stackData.Size() == 0) {
1269 return;
1270 }
1271
1272 int messageLen = stackData.Finish();
1273 RandomWriteCtx* ctx = nullptr;
1274 if ((!isSaService_) && (resultWriter_ != nullptr)) {
1275 resultWriter_->finishReport(resultWriter_, messageLen);
1276 resultWriter_->flush(resultWriter_);
1277 ctx = resultWriter_->startReport(resultWriter_);
1278 } else {
1279 profilerPluginData_.finishAdd_data(messageLen);
1280 int32_t mesgLen = FinishReport();
1281 dataFlushSize_ += static_cast<uint32_t>(mesgLen);
1282 if (dataFlushSize_ >= FLUSH_BASELINE_SA && writer_ != nullptr) {
1283 std::shared_ptr<TraceFileWriter> tfPtr = std::static_pointer_cast<TraceFileWriter>(writer_);
1284 tfPtr->UpdateSaFileHeader();
1285 dataFlushSize_ = 0;
1286 }
1287 ctx = StartReport();
1288 }
1289
1290 if (ctx == nullptr) {
1291 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
1292 return;
1293 }
1294 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
1295 }
1296
Flush(const uint8_t * src,size_t size)1297 void StackPreprocess::Flush(const uint8_t* src, size_t size)
1298 {
1299 if (src == nullptr) {
1300 PROFILER_LOG_ERROR(LOG_CORE, "Flush src is nullptr");
1301 return;
1302 }
1303
1304 if (writer_ == nullptr) {
1305 PROFILER_LOG_ERROR(LOG_CORE, "Flush writer_ is nullptr");
1306 return;
1307 }
1308 writer_->Write(src, size);
1309 writer_->Flush();
1310
1311 std::get<::BatchNativeHookData>(stackData_).clear_events();
1312 }
1313
GetSymbols(const std::string & filePath,ElfSymbolTable & symbols)1314 void StackPreprocess::GetSymbols(const std::string& filePath, ElfSymbolTable& symbols)
1315 {
1316 std::shared_ptr<DfxElf> elfPtr = std::make_shared<DfxElf>(filePath);
1317
1318 symbols.textVaddr = elfPtr->GetStartVaddr();
1319 symbols.textOffset = elfPtr->GetStartOffset();
1320 if (symbols.textVaddr == (std::numeric_limits<uint64_t>::max)()) {
1321 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get textVaddr failed");
1322 return;
1323 }
1324
1325 std::string symSecName;
1326 std::string strSecName;
1327 ShdrInfo shdr;
1328 if (elfPtr->GetSectionInfo(shdr, ".symtab")) {
1329 symSecName = ".symtab";
1330 strSecName = ".strtab";
1331 } else if (elfPtr->GetSectionInfo(shdr, ".dynsym")) {
1332 symSecName = ".dynsym";
1333 strSecName = ".dynstr";
1334 } else {
1335 return;
1336 }
1337 symbols.symEntSize = shdr.entSize;
1338 symbols.symTable.resize(shdr.size);
1339 if (!elfPtr->GetSectionData(symbols.symTable.data(), shdr.size, symSecName)) {
1340 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get symbol section data failed");
1341 return;
1342 }
1343 if (!elfPtr->GetSectionInfo(shdr, strSecName)) {
1344 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
1345 return;
1346 }
1347 symbols.strTable.resize(shdr.size);
1348 if (!elfPtr->GetSectionData(symbols.strTable.data(), shdr.size, strSecName)) {
1349 PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
1350 return;
1351 }
1352 }
1353
FlushRecordStatistics()1354 bool StackPreprocess::FlushRecordStatistics()
1355 {
1356 if (statisticsPeriodData_.empty()) {
1357 return false;
1358 }
1359 std::visit([&](auto& stackData) {
1360 FlushData(stackData);
1361 }, stackData_);
1362 std::visit([&](auto& stackData) {
1363 struct timespec ts;
1364 clock_gettime(hookDataClockId_, &ts);
1365 for (auto [addr, statistics] : statisticsPeriodData_) {
1366 auto hookData = stackData.add_events();
1367 hookData->set_tv_sec(ts.tv_sec);
1368 hookData->set_tv_nsec(ts.tv_nsec);
1369 auto recordEvent = hookData->mutable_statistics_event();
1370 recordEvent->set_pid(statistics->pid);
1371 recordEvent->set_callstack_id(statistics->callstackId);
1372 recordEvent->set_type(statistics->type);
1373 recordEvent->set_apply_count(statistics->applyCount);
1374 recordEvent->set_release_count(statistics->releaseCount);
1375 recordEvent->set_apply_size(statistics->applySize);
1376 recordEvent->set_release_size(statistics->releaseSize);
1377 }
1378 FlushData(stackData);
1379 }, stackData_);
1380 statisticsPeriodData_.clear();
1381 return true;
1382 }
1383
SaveMemTag(uint32_t tagId,const std::string & tagName)1384 void StackPreprocess::SaveMemTag(uint32_t tagId, const std::string& tagName)
1385 {
1386 std::string temp;
1387 bool res = memTagMap_.Find(tagId, temp);
1388 if (!res) {
1389 memTagMap_.EnsureInsert(tagId, tagName);
1390 }
1391 }
1392
GetMemTag(uint32_t tagId,std::string & tagName)1393 bool StackPreprocess::GetMemTag(uint32_t tagId, std::string& tagName)
1394 {
1395 return memTagMap_.Find(tagId, tagName);
1396 }
1397
SaveJsRawStack(uint64_t jsChainId,const char * jsRawStack)1398 void StackPreprocess::SaveJsRawStack(uint64_t jsChainId, const char* jsRawStack)
1399 {
1400 auto iterChainId = jsStackMap_.find(jsChainId);
1401 if (iterChainId == jsStackMap_.end()) {
1402 auto iterRawStack = jsStackSet_.find(jsRawStack);
1403 if (iterRawStack == jsStackSet_.end()) {
1404 auto iter = jsStackSet_.insert(jsRawStack);
1405 jsStackMap_[jsChainId] = iter.first->c_str();
1406 } else {
1407 jsStackMap_[jsChainId] = iterRawStack->c_str();
1408 }
1409 }
1410 }
1411
GetJsRawStack(uint64_t jsChainId)1412 const char* StackPreprocess::GetJsRawStack(uint64_t jsChainId)
1413 {
1414 auto iter = jsStackMap_.find(jsChainId);
1415 if (iter != jsStackMap_.end()) {
1416 return iter->second;
1417 }
1418 return nullptr;
1419 }
1420
LgFloor(unsigned long val)1421 unsigned StackPreprocess::LgFloor(unsigned long val)
1422 {
1423 val |= (val >> RIGHT_MOVE_1);
1424 val |= (val >> RIGHT_MOVE_2);
1425 val |= (val >> RIGHT_MOVE_4);
1426 val |= (val >> RIGHT_MOVE_8);
1427 val |= (val >> RIGHT_MOVE_16);
1428 if (sizeof(val) > 4) { // 4: sizeThreshold
1429 int constant = sizeof(val) * 4; // 4: sizeThreshold
1430 val |= (val >> constant);
1431 }
1432 val++;
1433 if (val == 0) {
1434 return 8 * sizeof(val) - 1; // 8: 8byte
1435 }
1436 return __builtin_ffsl(val) - 2; // 2: adjustment
1437 }
1438
PowCeil(uint64_t val)1439 uint64_t StackPreprocess::PowCeil(uint64_t val)
1440 {
1441 size_t msbIndex = LgFloor(val - 1);
1442 return 1ULL << (msbIndex + 1);
1443 }
1444
ComputeAlign(size_t size)1445 size_t StackPreprocess::ComputeAlign(size_t size)
1446 {
1447 if (size == 0) {
1448 return 0;
1449 }
1450 unsigned index = 0;
1451 if (size <= (size_t(1) << SC_LG_TINY_MAXCLASS)) {
1452 unsigned lgTmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
1453 unsigned lgCeil = LgFloor(PowCeil(size));
1454 index = (lgCeil < lgTmin) ? 0 : lgCeil - lgTmin;
1455 } else {
1456 unsigned floor = LgFloor((size << 1) - 1);
1457 unsigned shift = (floor < SC_LG_NGROUP + LG_QUANTUM) ? 0 : floor - (SC_LG_NGROUP + LG_QUANTUM);
1458 unsigned grp = shift << SC_LG_NGROUP;
1459 unsigned lgDelta = (floor < SC_LG_NGROUP + LG_QUANTUM + 1) ? LG_QUANTUM : floor - SC_LG_NGROUP - 1;
1460 size_t deltaInverseMask = size_t(-1) << lgDelta;
1461 unsigned mod = ((((size - 1) & deltaInverseMask) >> lgDelta)) & ((size_t(1) << SC_LG_NGROUP) - 1);
1462 index = SC_NTINY + grp + mod;
1463 }
1464
1465 if (index < NTBINS) {
1466 return (size_t(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
1467 }
1468 size_t reducedIndex = index - NTBINS;
1469 size_t grpVal = reducedIndex >> LG_SIZE_CLASS_GROUP;
1470 size_t modVal = reducedIndex & ((size_t(1) << LG_SIZE_CLASS_GROUP) - 1);
1471 size_t grpSizeMask = ~((!!grpVal) - 1);
1472 size_t grpSize = ((size_t(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP - 1))) << grpVal) & grpSizeMask;
1473 size_t shiftVal = (grpVal == 0) ? 1 : grpVal;
1474 size_t lgDeltaVal = shiftVal + (LG_QUANTUM - 1);
1475 size_t modSize = (modVal + 1) << lgDeltaVal;
1476 size_t usize = grpSize + modSize;
1477 return usize;
1478 }
1479
WriteHookConfig()1480 void StackPreprocess::WriteHookConfig()
1481 {
1482 const size_t configSize = hookConfig_.ByteSizeLong();
1483 auto buffer = std::make_unique<uint8_t[]>(configSize);
1484 hookConfig_.SerializeToArray(buffer.get(), configSize);
1485 if (writer_ != nullptr) {
1486 writer_->ResetPos();
1487 profilerPluginData_.Reset(writer_->GetCtx());
1488 }
1489 profilerPluginData_.set_name("nativehook_config");
1490 profilerPluginData_.set_version("1.02");
1491 profilerPluginData_.set_status(0);
1492 profilerPluginData_.set_data(buffer.get(), configSize);
1493
1494 FinishReport();
1495
1496 auto ctx = StartReport();
1497 if (ctx == nullptr) {
1498 PROFILER_LOG_ERROR(LOG_CORE, "%s: get RandomWriteCtx FAILED!", __func__);
1499 return;
1500 }
1501 stackData_ = ProtoEncoder::BatchNativeHookData(ctx);
1502 }
1503
StartReport()1504 RandomWriteCtx* StackPreprocess::StartReport()
1505 {
1506 if (writer_ != nullptr) {
1507 writer_->ResetPos();
1508 profilerPluginData_.Reset(writer_->GetCtx());
1509 }
1510 profilerPluginData_.set_name("nativehook");
1511 profilerPluginData_.set_version("1.02");
1512 profilerPluginData_.set_status(0);
1513 return profilerPluginData_.startAdd_data();
1514 }
1515
FinishReport()1516 int32_t StackPreprocess::FinishReport()
1517 {
1518 struct timespec ts;
1519 clock_gettime(pluginDataClockId_, &ts);
1520 profilerPluginData_.set_clock_id(static_cast<ProfilerPluginData_ClockId>(pluginDataClockId_));
1521 profilerPluginData_.set_tv_sec(ts.tv_sec);
1522 profilerPluginData_.set_tv_nsec(ts.tv_nsec);
1523
1524 int32_t len = profilerPluginData_.Finish();
1525 if (writer_ == nullptr) {
1526 PROFILER_LOG_ERROR(LOG_CORE, "%s: the writer is nullptr!", __func__);
1527 return 0;
1528 }
1529 writer_->FinishReport(len);
1530 return len;
1531 }
1532
FillFpNativeIp(RawStackPtr & rawData)1533 void StackPreprocess::FillFpNativeIp(RawStackPtr& rawData)
1534 {
1535 uint64_t* fpIp = reinterpret_cast<uint64_t *>(rawData->data);
1536 for (uint8_t idx = 0; idx < rawData->fpDepth ; ++idx) {
1537 if (fpIp[idx] == 0) {
1538 break;
1539 }
1540 callFrames_.emplace_back(StripPac(fpIp[idx], 0));
1541 }
1542 }
1543
FillFpJsData(RawStackPtr & rawData)1544 void StackPreprocess::FillFpJsData(RawStackPtr& rawData)
1545 {
1546 if (hookConfig_.statistics_interval() > 0) {
1547 switch (rawData->stackConext->type) {
1548 case FREE_MSG:
1549 case MUNMAP_MSG:
1550 case MEMORY_UNUSING_MSG:
1551 return;
1552 default:
1553 break;
1554 }
1555 }
1556 fpJsCallStacks_.clear();
1557 /**
1558 * jsStackData:
1559 * ts_malloc1|entry/src/main/ets/pages/Index.ets:5:5,ts_malloc2|entry/src/main/ets/pages/Index.ets:8:5
1560 * | |
1561 * JS_SYMBOL_FILEPATH_SEP JS_CALL_STACK_DEPTH_SEP
1562 * jsCallStack:
1563 * ts_malloc1|entry/src/main/ets/pages/Index.ets:5:5
1564 * / \
1565 * | |
1566 * jsSymbolFilePathSepPos |
1567 * jsFilePathPos = jsSymbolFilePathSepPos + 1
1568 */
1569 AdvancedSplitString(rawData->jsStackData, JS_CALL_STACK_DEPTH_SEP, fpJsCallStacks_);
1570 for (std::string& jsCallStack: fpJsCallStacks_) {
1571 std::string::size_type jsSymbolFilePathSepPos = jsCallStack.find_first_of(JS_SYMBOL_FILEPATH_SEP);
1572 if (jsSymbolFilePathSepPos == std::string::npos) {
1573 PROFILER_LOG_ERROR(LOG_CORE, "%s: jsCallStack find FAILED!", __func__);
1574 continue;
1575 }
1576 std::string::size_type jsFilePathPos = jsSymbolFilePathSepPos + 1;
1577 jsCallStack[jsSymbolFilePathSepPos] = '\0'; // "ts_malloc1'\0'entry/src/main/ets/pages/Index.ets:5:5"
1578 CallFrame& jsCallFrame = callFrames_.emplace_back(0, 0, true);
1579 jsCallFrame.symbolName_ = StringViewMemoryHold::GetInstance().HoldStringView(jsCallStack.c_str());
1580 jsCallFrame.filePath_ = StringViewMemoryHold::GetInstance().HoldStringView(jsCallStack.c_str() + jsFilePathPos);
1581 if (hookConfig_.offline_symbolization()) {
1582 DfxSymbol symbol;
1583 if (!runtime_instance->ArktsGetSymbolCache(jsCallFrame, symbol)) {
1584 symbol.filePathId_ = runtime_instance->FillArkTsFilePath(jsCallFrame.filePath_);
1585 symbol.symbolName_ = jsCallFrame.symbolName_;
1586 symbol.module_ = jsCallFrame.filePath_;
1587 symbol.symbolId_ = runtime_instance->GetJsSymbolCacheSize();
1588 runtime_instance->FillSymbolNameId(jsCallFrame, symbol);
1589 runtime_instance->FillFileSet(jsCallFrame, symbol);
1590 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
1591 runtime_instance->FillJsSymbolCache(jsCallFrame, symbol);
1592 }
1593 jsCallFrame.callFrameId_ = symbol.symbolId_;
1594 jsCallFrame.symbolNameId_ = symbol.symbolNameId_;
1595 jsCallFrame.filePathId_ = symbol.filePathId_;
1596 jsCallFrame.filePath_ = symbol.module_;
1597 jsCallFrame.symbolName_ = symbol.symbolName_;
1598 }
1599 }
1600 }
1601
FillDwarfErrorStack()1602 void StackPreprocess::FillDwarfErrorStack()
1603 {
1604 CallFrame& jsCallFrame = callFrames_.emplace_back(0);
1605 jsCallFrame.symbolName_ = "UnwindErrorDwarf";
1606 jsCallFrame.isJsFrame_ = true;
1607 jsCallFrame.needReport_ |= CALL_FRAME_REPORT;
1608 jsCallFrame.needReport_ |= SYMBOL_NAME_ID_REPORT;
1609 jsCallFrame.needReport_ |= FILE_PATH_ID_REPORT;
1610 jsCallFrame.callFrameId_ = DWARF_ERROR_ID;
1611 jsCallFrame.symbolNameId_ = DWARF_ERROR_ID;
1612 jsCallFrame.filePathId_ = DWARF_ERROR_ID;
1613 jsCallFrame.filePath_ = "no-file-path";
1614 }
1615
FlushRecordApplyAndReleaseMatchData()1616 void StackPreprocess::FlushRecordApplyAndReleaseMatchData()
1617 {
1618 if (applyAndReleaseMatchPeriodListData_.empty()) {
1619 return;
1620 }
1621 std::visit([&](auto& stackData) {
1622 for (const auto& rawStack: applyAndReleaseMatchPeriodListData_) {
1623 auto hookData = stackData.add_events();
1624 hookData->set_tv_sec(rawStack.ts.tv_sec);
1625 hookData->set_tv_nsec(rawStack.ts.tv_nsec);
1626 if (rawStack.type == MALLOC_MSG) {
1627 auto allocEvent = hookData->mutable_alloc_event();
1628 SetEventFrame(rawStack, allocEvent, rawStack.stackMapId);
1629 } else if (rawStack.type == FREE_MSG) {
1630 auto freeEvent = hookData->mutable_free_event();
1631 SetEventFrame(rawStack, freeEvent, rawStack.stackMapId);
1632 } else if (rawStack.type == MMAP_MSG) {
1633 auto mmapEvent = hookData->mutable_mmap_event();
1634 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId);
1635 } else if (rawStack.type == MMAP_FILE_PAGE_MSG) {
1636 auto mmapEvent = hookData->mutable_mmap_event();
1637 const std::string prefix = "FilePage:";
1638 std::string tagName;
1639 if (GetMemTag(rawStack.tagId, tagName)) {
1640 tagName = prefix + tagName;
1641 }
1642 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId, tagName);
1643 } else if (rawStack.type == MUNMAP_MSG) {
1644 auto munmapEvent = hookData->mutable_munmap_event();
1645 SetEventFrame(rawStack, munmapEvent, rawStack.stackMapId);
1646 } else if (rawStack.type == PR_SET_VMA_MSG) {
1647 auto tagEvent = hookData->mutable_tag_event();
1648 const std::string prefix = "Anonymous:";
1649 tagEvent->set_addr(rawStack.addr);
1650 tagEvent->set_size(rawStack.mallocSize);
1651 tagEvent->set_tag(prefix + prctlPeriodTags_[rawStack.tagId]);
1652 tagEvent->set_pid(pid_);
1653 } else if (rawStack.type == MEMORY_USING_MSG) {
1654 auto mmapEvent = hookData->mutable_mmap_event();
1655 std::string tagName;
1656 GetMemTag(rawStack.tagId, tagName);
1657 SetEventFrame(rawStack, mmapEvent, rawStack.stackMapId, tagName);
1658 } else if (rawStack.type == MEMORY_UNUSING_MSG) {
1659 auto munmapEvent = hookData->mutable_munmap_event();
1660 SetEventFrame(rawStack, munmapEvent, rawStack.stackMapId);
1661 }
1662 }
1663 FlushData(stackData);
1664 }, stackData_);
1665 applyAndReleaseMatchPeriodListData_.clear();
1666 applyAndReleaseMatchIntervallMap_.clear();
1667 prctlPeriodTags_.clear();
1668 }