• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "stack_preprocess.h"
17 
18 #include <elf.h>
19 #include <unistd.h>
20 
21 #include "common.h"
22 #include "logging.h"
23 #include "plugin_service_types.pb.h"
24 #include "dfx_elf.h"
25 #include "utilities.h"
26 #include "native_hook_result_standard.pb.h"
27 #include "native_hook_config_standard.pb.h"
28 #include "google/protobuf/text_format.h"
29 #include "trace_file_writer.h"
30 
31 
32 constexpr static uint32_t SC_LG_TINY_MIN = 3;
33 constexpr static uint32_t LG_QUANTUM = 4;
34 constexpr static uint32_t SC_NTINY = LG_QUANTUM - SC_LG_TINY_MIN;
35 constexpr static uint32_t SC_LG_TINY_MAXCLASS = (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1);
36 constexpr static uint32_t SC_LG_NGROUP = 2;
37 constexpr static uint32_t LG_SIZE_CLASS_GROUP = 2;
38 constexpr static uint32_t NTBINS = 1;
39 constexpr static uint32_t LG_TINY_MAXCLASS = 3;
40 constexpr static uint32_t MAX_BUFFER_SIZE = 10 * 1024 * 1024;
41 constexpr static uint32_t MAX_MATCH_CNT = 1000;
42 constexpr static uint32_t MAX_MATCH_INTERVAL = 2000;
43 constexpr static uint32_t LOG_PRINT_TIMES = 10000;
44 constexpr static uint32_t MAX_BATCH_CNT = 5;
45 constexpr static uint32_t RIGHT_MOVE_1 = 1;
46 constexpr static uint32_t RIGHT_MOVE_2 = 2;
47 constexpr static uint32_t RIGHT_MOVE_4 = 4;
48 constexpr static uint32_t RIGHT_MOVE_8 = 8;
49 constexpr static uint32_t RIGHT_MOVE_16 = 16;
50 constexpr static uint64_t SIZE_MASK = 0xFFFFFF0000000000;
51 
52 using namespace OHOS::Developtools::NativeDaemon;
53 using namespace OHOS::HiviewDFX;
54 
StackPreprocess(const StackDataRepeaterPtr & dataRepeater,const NativeHookConfig & hookConfig,clockid_t pluginDataClockId,FILE * fpHookData,bool isHookStandalone)55 StackPreprocess::StackPreprocess(const StackDataRepeaterPtr& dataRepeater,
56     const NativeHookConfig& hookConfig,
57     clockid_t pluginDataClockId, FILE* fpHookData,
58     bool isHookStandalone) : dataRepeater_(dataRepeater), buffer_(new (std::nothrow) uint8_t[MAX_BUFFER_SIZE]),
59                              hookConfig_(hookConfig), pluginDataClockId_(pluginDataClockId), fpHookData_(fpHookData),
60                              isHookStandaloneSerialize_(isHookStandalone)
61 {
62     runtime_instance = std::make_shared<VirtualRuntime>(hookConfig_);
63 
64     if (hookConfig_.malloc_free_matching_interval() > MAX_MATCH_INTERVAL) {
65         PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_interval());
66         hookConfig_.set_malloc_free_matching_interval(MAX_MATCH_INTERVAL);
67     }
68 
69     if (hookConfig_.malloc_free_matching_cnt() > MAX_MATCH_CNT) {
70         PROFILER_LOG_INFO(LOG_CORE, "Not support set %d", hookConfig_.malloc_free_matching_cnt());
71         hookConfig_.set_malloc_free_matching_cnt(MAX_MATCH_CNT);
72     }
73     PROFILER_LOG_INFO(LOG_CORE, "malloc_free_matching_interval = %d malloc_free_matching_cnt = %d\n",
74         hookConfig_.malloc_free_matching_interval(), hookConfig_.malloc_free_matching_cnt());
75 
76     if (hookConfig_.statistics_interval() > 0) {
77         statisticsInterval_ = std::chrono::seconds(hookConfig_.statistics_interval());
78     }
79     PROFILER_LOG_INFO(LOG_CORE, "statistics_interval = %d statisticsInterval_ = %lld \n",
80         hookConfig_.statistics_interval(), statisticsInterval_.count());
81     hookDataClockId_ = COMMON::GetClockId(hookConfig_.clock());
82     PROFILER_LOG_INFO(LOG_CORE, "StackPreprocess(): pluginDataClockId = %d hookDataClockId = %d \n",
83         pluginDataClockId_, hookDataClockId_);
84     if (hookConfig_.save_file() && fpHookData_ == nullptr) {
85         PROFILER_LOG_ERROR(LOG_CORE, "If you need to save the file, please set the file_name");
86     }
87     PROFILER_LOG_INFO(LOG_CORE, "isHookStandaloneSerialize_ = %d", isHookStandaloneSerialize_);
88 #if defined(__arm__)
89     u64regs_.resize(PERF_REG_ARM_MAX);
90 #else
91     u64regs_.resize(PERF_REG_ARM64_MAX);
92 #endif
93     callFrames_.reserve(hookConfig_.max_stack_depth());
94 }
95 
~StackPreprocess()96 StackPreprocess::~StackPreprocess()
97 {
98     isStopTakeData_ = true;
99     if (dataRepeater_) {
100         dataRepeater_->Close();
101     }
102     if (thread_.joinable()) {
103         thread_.join();
104     }
105     runtime_instance = nullptr;
106     fpHookData_ = nullptr;
107 
108     if (isSaService_) {
109         std::shared_ptr<TraceFileWriter> tfPtr = std::static_pointer_cast<TraceFileWriter>(writer_);
110         tfPtr->SetDurationTime();
111         tfPtr->Finish();
112     }
113 }
114 
SetWriter(const std::shared_ptr<Writer> & writer)115 void StackPreprocess::SetWriter(const std::shared_ptr<Writer>& writer)
116 {
117     writer_ = writer;
118 }
119 
StartTakeResults()120 bool StackPreprocess::StartTakeResults()
121 {
122     CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
123 
124     std::thread demuxer(&StackPreprocess::TakeResults, this);
125     CHECK_TRUE(demuxer.get_id() != std::thread::id(), false, "demuxer thread invalid");
126 
127     thread_ = std::move(demuxer);
128     isStopTakeData_ = false;
129     return true;
130 }
131 
StopTakeResults()132 bool StackPreprocess::StopTakeResults()
133 {
134     PROFILER_LOG_INFO(LOG_CORE, "start StopTakeResults");
135     if (!dataRepeater_) {
136         isStopTakeData_ = true;
137         return true;
138     }
139     CHECK_NOTNULL(dataRepeater_, false, "data repeater null");
140     CHECK_TRUE(thread_.get_id() != std::thread::id(), false, "thread invalid");
141 
142     isStopTakeData_ = true;
143     dataRepeater_->PutRawStack(nullptr, false);
144     PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join");
145 
146     if (thread_.joinable()) {
147         thread_.join();
148     }
149     PROFILER_LOG_INFO(LOG_CORE, "StopTakeResults Wait thread join success");
150     return true;
151 }
152 
IntervalFlushRecordStatistics(BatchNativeHookData & stackData)153 inline void StackPreprocess::IntervalFlushRecordStatistics(BatchNativeHookData& stackData)
154 {
155     {
156         std::lock_guard<std::mutex> guard(mtx_);
157         FlushData(stackData);
158     }
159     // interval reporting statistics
160     if (hookConfig_.statistics_interval() > 0) {
161         static auto lastStatisticsTime = std::chrono::steady_clock::now();
162         auto currentTime = std::chrono::steady_clock::now();
163         auto elapsedTime = std::chrono::duration_cast<std::chrono::seconds>(currentTime - lastStatisticsTime);
164         if (elapsedTime >= statisticsInterval_) {
165             lastStatisticsTime = currentTime;
166             FlushRecordStatistics();
167         }
168     }
169 }
170 
HandleNoStackEvent(RawStackPtr & rawData,BatchNativeHookData & stackData)171 bool StackPreprocess::HandleNoStackEvent(RawStackPtr& rawData, BatchNativeHookData& stackData)
172 {
173     if (rawData->stackConext->type == MMAP_FILE_TYPE) {
174         BaseStackRawData* mmapRawData = rawData->stackConext;
175         std::string filePath(reinterpret_cast<char *>(rawData->data));
176         COMMON::AdaptSandboxPath(filePath, rawData->stackConext->pid);
177         PROFILER_LOG_DEBUG(LOG_CORE, "MMAP_FILE_TYPE curMmapAddr=%p, MAP_FIXED=%d, "
178                     "PROT_EXEC=%d, offset=%" PRIu64 ", filePath=%s",
179                     mmapRawData->addr, mmapRawData->mmapArgs.flags & MAP_FIXED,
180                     mmapRawData->mmapArgs.flags & PROT_EXEC, mmapRawData->mmapArgs.offset, filePath.data());
181         {
182             std::lock_guard<std::mutex> guard(mtx_);
183             runtime_instance->HandleMapInfo(reinterpret_cast<uint64_t>(mmapRawData->addr),
184                 mmapRawData->mallocSize, mmapRawData->mmapArgs.flags, mmapRawData->mmapArgs.offset, filePath);
185         }
186         flushBasicData_ = true;
187     } else if (rawData->stackConext->type == THREAD_NAME_MSG) {
188         std::string threadName = reinterpret_cast<char*>(rawData->data);
189         ReportThreadNameMap(rawData->stackConext->tid, threadName, stackData);
190     } else {
191         return false;
192     }
193     return true;
194 }
195 
TakeResultsFromShmem(const std::shared_ptr<EventNotifier> & eventNotifier,const std::shared_ptr<ShareMemoryBlock> & shareMemoryBlock)196 void StackPreprocess::TakeResultsFromShmem(const std::shared_ptr<EventNotifier>& eventNotifier,
197                                            const std::shared_ptr<ShareMemoryBlock>& shareMemoryBlock)
198 {
199     eventNotifier->Take();
200     StackDataRepeater::RawStack rawStack;
201     RawStackPtr rawData(&rawStack, [](StackDataRepeater::RawStack* del) {});
202     while (!isStopTakeData_) {
203         BatchNativeHookData stackData;
204         bool ret = shareMemoryBlock->TakeData(
205             [&](const int8_t data[], uint32_t size) -> bool {
206                 if (size == sizeof(uint64_t)) {
207                     uint64_t addr = *reinterpret_cast<uint64_t *>(const_cast<int8_t *>(data));
208                     SetFreeStatisticsData(addr);
209                     return true;
210                 }
211                 CHECK_TRUE(size >= sizeof(BaseStackRawData), false, "stack data invalid!");
212                 rawData->stackConext = reinterpret_cast<BaseStackRawData *>(const_cast<int8_t *>(data));
213                 rawData->data = reinterpret_cast<uint8_t*>(const_cast<int8_t *>(data)) + sizeof(BaseStackRawData);
214                 rawData->fpDepth = (size - sizeof(BaseStackRawData)) / sizeof(uint64_t);
215                 if (isStopTakeData_) {
216                     return false;
217                 } else if (rawData->stackConext->type == MEMORY_TAG) {
218                     std::string tagName = reinterpret_cast<char*>(rawData->data);
219                     SaveMemTag(rawData->stackConext->tagId, tagName);
220                     return true;
221                 } else if (HandleNoStackEvent(rawData, stackData)) {
222                     if (rawData->stackConext->type == THREAD_NAME_MSG) {
223                         FlushData(stackData);
224                     }
225                     return true;
226                 } else if (rawData->stackConext->type == MUNMAP_MSG) {
227                     std::lock_guard<std::mutex> guard(mtx_);
228                     runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
229                 }
230                 {
231                     std::lock_guard<std::mutex> guard(mtx_);
232                     runtime_instance->UpdateThread(rawData->stackConext->pid, rawData->stackConext->tid);
233                 }
234                 SetHookData(rawData, stackData);
235                 IntervalFlushRecordStatistics(stackData);
236                 return true;
237         });
238         if (!ret) {
239             break;
240         }
241     }
242 }
243 
TakeResults()244 void StackPreprocess::TakeResults()
245 {
246     if (!dataRepeater_) {
247         return;
248     }
249 
250     size_t minStackDepth = hookConfig_.max_stack_depth() > MIN_STACK_DEPTH
251         ? MIN_STACK_DEPTH : hookConfig_.max_stack_depth();
252     if (hookConfig_.blocked()) {
253         minStackDepth = static_cast<size_t>(hookConfig_.max_stack_depth());
254     }
255     minStackDepth += FILTER_STACK_DEPTH;
256     PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, start!", gettid());
257     while (1) {
258         BatchNativeHookData stackData;
259         RawStackPtr batchRawStack[MAX_BATCH_CNT] = {nullptr};
260         auto result = dataRepeater_->TakeRawData(hookConfig_.malloc_free_matching_interval(), hookDataClockId_,
261             MAX_BATCH_CNT, batchRawStack);
262         if (!result || isStopTakeData_) {
263             break;
264         }
265         for (unsigned int i = 0; i < MAX_BATCH_CNT; i++) {
266             auto rawData = batchRawStack[i];
267             if (!rawData || isStopTakeData_) {
268                 break;
269             }
270             if (HandleNoStackEvent(rawData, stackData)) {
271                 continue;
272             } else if (rawData->stackConext->type == MUNMAP_MSG) {
273                 std::lock_guard<std::mutex> guard(mtx_);
274                 runtime_instance->RemoveMaps(reinterpret_cast<uint64_t>(rawData->stackConext->addr));
275             }
276 
277             if (!rawData->reportFlag) {
278                 ignoreCnts_++;
279                 if (ignoreCnts_ % LOG_PRINT_TIMES == 0) {
280                     PROFILER_LOG_INFO(LOG_CORE, "ignoreCnts_ = %d quene size = %zu\n",
281                                       ignoreCnts_, dataRepeater_->Size());
282                 }
283                 continue;
284             }
285             eventCnts_++;
286             if (eventCnts_ % LOG_PRINT_TIMES == 0) {
287                 PROFILER_LOG_INFO(LOG_CORE, "eventCnts_ = %d quene size = %zu\n", eventCnts_, dataRepeater_->Size());
288             }
289             callFrames_.clear();
290             if (hookConfig_.fp_unwind()) {
291 #if defined(__aarch64__)
292                 uintptr_t pacMask = 0xFFFFFF8000000000;
293 #else
294                 uintptr_t pacMask = 0;
295 #endif
296                 uint64_t* fpIp = reinterpret_cast<uint64_t *>(rawData->data);
297                 for (uint8_t idx = 0; idx < rawData->fpDepth ; ++idx) {
298                     if (fpIp[idx] == 0) {
299                         break;
300                     }
301                     callFrames_.emplace_back(fpIp[idx] & (~pacMask));
302                 }
303             } else {
304 #if defined(__arm__)
305                 uint32_t *regAddrArm = reinterpret_cast<uint32_t *>(rawData->data);
306                 u64regs_.assign(regAddrArm, regAddrArm + PERF_REG_ARM_MAX);
307 #else
308                 if (memcpy_s(u64regs_.data(), sizeof(uint64_t) * PERF_REG_ARM64_MAX, rawData->data,
309                     sizeof(uint64_t) * PERF_REG_ARM64_MAX) != EOK) {
310                     PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s regs failed");
311                 }
312 #endif
313             }
314 #ifdef PERFORMANCE_DEBUG
315             struct timespec start = {};
316             clock_gettime(CLOCK_REALTIME, &start);
317             size_t realFrameDepth = callFrames_.size();
318 #endif
319             size_t stackDepth = ((size_t)hookConfig_.max_stack_depth() > MAX_CALL_FRAME_UNWIND_SIZE)
320                         ? MAX_CALL_FRAME_UNWIND_SIZE
321                         : hookConfig_.max_stack_depth() + FILTER_STACK_DEPTH;
322             if (rawData->reduceStackFlag) {
323                 stackDepth = minStackDepth;
324             }
325             {
326                 std::lock_guard<std::mutex> guard(mtx_);
327                 bool ret = runtime_instance->UnwindStack(u64regs_, rawData->stackData, rawData->stackSize,
328                     rawData->stackConext->pid, rawData->stackConext->tid, callFrames_, stackDepth);
329                 if (!ret) {
330                     PROFILER_LOG_ERROR(LOG_CORE, "unwind fatal error");
331                     continue;
332                 }
333             }
334 
335             if (hookConfig_.save_file() && hookConfig_.file_name() != "" && isHookStandaloneSerialize_) {
336                 SetHookData(rawData, callFrames_, stackData);
337             } else if (hookConfig_.save_file() && hookConfig_.file_name() != "") {
338                 WriteFrames(rawData, callFrames_);
339             } else if (!hookConfig_.save_file()) {
340                 SetHookData(rawData, callFrames_, stackData);
341             }
342 #ifdef PERFORMANCE_DEBUG
343             struct timespec end = {};
344             clock_gettime(CLOCK_REALTIME, &end);
345             uint64_t curTimeCost = (end.tv_sec - start.tv_sec) * MAX_MATCH_CNT * MAX_MATCH_CNT * MAX_MATCH_CNT +
346                 (end.tv_nsec - start.tv_nsec);
347             if (curTimeCost >= LONG_TIME_THRESHOLD) {
348                 PROFILER_LOG_ERROR(LOG_CORE, "bigTimeCost %" PRIu64 " event=%d, realFrameDepth=%zu, "
349                     "callFramesDepth=%zu\n",
350                     curTimeCost, rawData->stackConext->type, realFrameDepth, callFrames_.size());
351             }
352             timeCost += curTimeCost;
353             unwindTimes++;
354             if (unwindTimes % LOG_PRINT_TIMES == 0) {
355                 PROFILER_LOG_ERROR(LOG_CORE, "unwindTimes %" PRIu64" cost time = %" PRIu64" mean cost = %" PRIu64"\n",
356                     unwindTimes.load(), timeCost.load(), timeCost.load() / unwindTimes.load());
357             }
358 #endif
359         }
360         if (hookConfig_.save_file() && hookConfig_.file_name() != "" && !isHookStandaloneSerialize_) {
361             continue;
362         }
363         IntervalFlushRecordStatistics(stackData);
364     }
365     PROFILER_LOG_INFO(LOG_CORE, "TakeResults thread %d, exit!", gettid());
366 }
367 
ReportThreadNameMap(uint32_t tid,const std::string & tname,BatchNativeHookData & batchNativeHookData)368 inline void StackPreprocess::ReportThreadNameMap(uint32_t tid, const std::string& tname,
369                                                  BatchNativeHookData& batchNativeHookData)
370 {
371     std::lock_guard<std::mutex> guard(mtx_);
372     auto it = threadNameMap_.find(tid);
373     if (it == threadNameMap_.end() || it->second != tname) {
374         threadNameMap_[tid] = tname;
375         auto hookData = batchNativeHookData.add_events();
376         auto* thread = hookData->mutable_thread_name_map();
377         thread->set_id(tid);
378         thread->set_name(tname);
379         thread->set_pid(pid_);
380     }
381 }
382 
FillOfflineCallStack(std::vector<CallFrame> & callFrames,size_t idx)383 inline void StackPreprocess::FillOfflineCallStack(std::vector<CallFrame>& callFrames, size_t idx)
384 {
385     for (; idx < callFrames.size(); ++idx) {
386         callStack_.push_back(callFrames[idx].ip_);
387     }
388 }
389 
FillCallStack(std::vector<CallFrame> & callFrames,BatchNativeHookData & batchNativeHookData,size_t idx)390 inline void StackPreprocess::FillCallStack(std::vector<CallFrame>& callFrames,
391     BatchNativeHookData& batchNativeHookData, size_t idx)
392 {
393     for (; idx < callFrames.size(); ++idx) {
394         ReportFrameMap(callFrames[idx], batchNativeHookData);
395         // for call stack id
396         callStack_.push_back(callFrames[idx].callFrameId_);
397     }
398 }
399 
FindCallStackId(std::vector<uint64_t> & callStack)400 inline uint32_t StackPreprocess::FindCallStackId(std::vector<uint64_t>& callStack)
401 {
402     if (hookConfig_.response_library_mode()) {
403         auto itStack = responseLibraryMap_.find(callStack[0]);
404         if (itStack != responseLibraryMap_.end()) {
405             return itStack->second;
406         }
407     } else {
408         auto itStack = callStackMap_.find(callStack);
409         if (itStack != callStackMap_.end()) {
410             return itStack->second;
411         }
412     }
413     return 0;
414 }
415 
416 /**
417  * @return '0' is invalid stack id, '> 0' is valid stack id
418  */
SetCallStackMap(BatchNativeHookData & batchNativeHookData)419 inline uint32_t StackPreprocess::SetCallStackMap(BatchNativeHookData& batchNativeHookData)
420 {
421     auto hookData = batchNativeHookData.add_events();
422     StackMap* stackmap = hookData->mutable_stack_map();
423     uint32_t stackId = 0;
424     if (hookConfig_.response_library_mode()) {
425         stackId = responseLibraryMap_.size() + 1;
426     } else {
427         stackId = callStackMap_.size() + 1;
428     }
429     stackmap->set_id(stackId);
430     stackmap->set_pid(pid_);
431     // offline symbolization use ip, other use frame_map_id
432     if (hookConfig_.offline_symbolization()) {
433         for (size_t i = 0; i < callStack_.size(); i++) {
434             stackmap->add_ip(callStack_[i]);
435         }
436     } else {
437         for (size_t i = 0; i < callStack_.size(); i++) {
438             stackmap->add_frame_map_id(callStack_[i]);
439         }
440     }
441     if (hookConfig_.response_library_mode()) {
442         responseLibraryMap_[callStack_[0]] = stackId;
443     } else {
444         callStackMap_[callStack_] = stackId;
445     }
446     return stackId;
447 }
448 
449 /**
450  * @return '0' is invalid stack id, '> 0' is valid stack id
451  */
GetCallStackId(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,BatchNativeHookData & batchNativeHookData)452 inline uint32_t StackPreprocess::GetCallStackId(const RawStackPtr& rawStack,
453     std::vector<CallFrame>& callFrames,
454     BatchNativeHookData& batchNativeHookData)
455 {
456     // ignore the first two frame if dwarf unwind
457     size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
458     // if free_stack_report or munmap_stack_report is false, don't need to record.
459     if ((rawStack->stackConext->type == FREE_MSG) && !hookConfig_.free_stack_report()) {
460         return 0;
461     } else if ((rawStack->stackConext->type == MUNMAP_MSG) && !hookConfig_.munmap_stack_report()) {
462         return 0;
463     }
464     callStack_.clear();
465     callStack_.reserve(callFrames.size());
466     if (!hookConfig_.offline_symbolization()) {
467         FillCallStack(callFrames, batchNativeHookData, idx);
468     } else {
469         FillOfflineCallStack(callFrames, idx);
470     }
471     // return call stack id
472     std::lock_guard<std::mutex> guard(mtx_);
473     uint32_t stackId = FindCallStackId(callStack_);
474     if (stackId > 0) {
475         return stackId;
476     } else {
477         return SetCallStackMap(batchNativeHookData);
478     }
479 }
480 
481 template <typename T>
SetEventFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,BatchNativeHookData & batchNativeHookData,T * event,uint32_t stackMapId)482 void StackPreprocess::SetEventFrame(const RawStackPtr& rawStack,
483     std::vector<CallFrame>& callFrames,
484     BatchNativeHookData& batchNativeHookData,
485     T* event, uint32_t stackMapId)
486 {
487     // ignore the first two frame if dwarf unwind
488     size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
489     event->set_pid(rawStack->stackConext->pid);
490     event->set_tid(rawStack->stackConext->tid);
491     event->set_addr((uint64_t)rawStack->stackConext->addr);
492 
493     if (hookConfig_.callframe_compress() && stackMapId != 0) {
494         event->set_stack_id(stackMapId);
495     } else {
496         for (; idx < callFrames.size(); ++idx) {
497             Frame* frame = event->add_frame_info();
498             SetFrameInfo(*frame, callFrames[idx]);
499         }
500     }
501 }
502 
SetAllocStatisticsFrame(const RawStackPtr & rawStack,std::vector<CallFrame> & callFrames,BatchNativeHookData & batchNativeHookData)503 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack,
504     std::vector<CallFrame>& callFrames,
505     BatchNativeHookData& batchNativeHookData)
506 {
507     // ignore the first two frame if dwarf unwind
508     size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
509     callStack_.clear();
510     callStack_.reserve(callFrames.size() + 1);
511     callStack_.push_back(rawStack->stackConext->mallocSize | SIZE_MASK);
512     if (!hookConfig_.offline_symbolization()) {
513         FillCallStack(callFrames, batchNativeHookData, idx);
514     } else {
515         FillOfflineCallStack(callFrames, idx);
516     }
517 
518     std::lock_guard<std::mutex> guard(mtx_);
519     // by call stack id set alloc statistics data.
520     uint32_t stackId = FindCallStackId(callStack_);
521     if (stackId > 0) {
522         SetAllocStatisticsData(rawStack, stackId, true);
523     } else {
524         stackId = SetCallStackMap(batchNativeHookData);
525         SetAllocStatisticsData(rawStack, stackId);
526     }
527 }
528 
SetAllocStatisticsFrame(const RawStackPtr & rawStack,BatchNativeHookData & batchNativeHookData)529 void StackPreprocess::SetAllocStatisticsFrame(const RawStackPtr& rawStack,
530     BatchNativeHookData& batchNativeHookData)
531 {
532     callStack_.resize(rawStack->fpDepth + 1);
533     callStack_[0] = (rawStack->stackConext->mallocSize | SIZE_MASK);
534     if (memcpy_s(callStack_.data() + 1, sizeof(uint64_t) * rawStack->fpDepth,
535                  rawStack->data, sizeof(uint64_t) * rawStack->fpDepth) != EOK) {
536         PROFILER_LOG_ERROR(LOG_CORE, "memcpy_s callStack_ failed");
537         return;
538     }
539     std::lock_guard<std::mutex> guard(mtx_);
540     // by call stack id set alloc statistics data.
541     uint32_t stackId = FindCallStackId(callStack_);
542     if (stackId > 0) {
543         SetAllocStatisticsData(rawStack, stackId, true);
544     } else {
545         stackId = SetCallStackMap(batchNativeHookData);
546         SetAllocStatisticsData(rawStack, stackId);
547     }
548 }
549 
SetHookData(RawStackPtr rawStack,BatchNativeHookData & batchNativeHookData)550 void StackPreprocess::SetHookData(RawStackPtr rawStack, BatchNativeHookData& batchNativeHookData)
551 {
552     if (flushBasicData_) {
553         SetMapsInfo(rawStack->stackConext->pid);
554         flushBasicData_ = false;
555     }
556     // statistical reporting must is compressed and accurate.
557     switch (rawStack->stackConext->type) {
558         case FREE_MSG:
559         case MUNMAP_MSG:
560         case MEMORY_UNUSING_MSG: {
561             SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
562             break;
563         }
564         case MALLOC_MSG:
565             rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
566         case MMAP_MSG:
567         case MMAP_FILE_PAGE_MSG:
568         case MEMORY_USING_MSG: {
569             SetAllocStatisticsFrame(rawStack, batchNativeHookData);
570             break;
571         }
572         case PR_SET_VMA_MSG: {
573             break;
574         }
575         default: {
576             PROFILER_LOG_ERROR(LOG_CORE, "statistics event type: error");
577             break;
578         }
579     }
580     return;
581 }
582 
SetHookData(RawStackPtr rawStack,std::vector<CallFrame> & callFrames,BatchNativeHookData & batchNativeHookData)583 void StackPreprocess::SetHookData(RawStackPtr rawStack,
584     std::vector<CallFrame>& callFrames, BatchNativeHookData& batchNativeHookData)
585 {
586     if (hookConfig_.offline_symbolization() && flushBasicData_) {
587         SetMapsInfo(rawStack->stackConext->pid);
588         flushBasicData_ = false;
589     }
590 
591     // statistical reporting must is compressed and accurate.
592     if (hookConfig_.statistics_interval() > 0) {
593         switch (rawStack->stackConext->type) {
594             case FREE_MSG:
595             case MUNMAP_MSG:
596             case MEMORY_UNUSING_MSG: {
597                 SetFreeStatisticsData((uint64_t)rawStack->stackConext->addr);
598                 break;
599             }
600             case MALLOC_MSG:
601                 rawStack->stackConext->mallocSize = ComputeAlign(rawStack->stackConext->mallocSize);
602             case MMAP_MSG:
603             case MMAP_FILE_PAGE_MSG:
604             case MEMORY_USING_MSG: {
605                 SetAllocStatisticsFrame(rawStack, callFrames, batchNativeHookData);
606                 break;
607             }
608             case PR_SET_VMA_MSG: {
609                 break;
610             }
611             default: {
612                 PROFILER_LOG_ERROR(LOG_CORE, "statistics event type:%d error", rawStack->stackConext->type);
613                 break;
614             }
615         }
616         return;
617     }
618 
619     uint32_t stackMapId = 0;
620     if (hookConfig_.callframe_compress() &&
621         !(rawStack->stackConext->type == MEMORY_TAG || rawStack->stackConext->type == PR_SET_VMA_MSG)) {
622         stackMapId = GetCallStackId(rawStack, callFrames, batchNativeHookData);
623     }
624 
625     if ((!hookConfig_.callframe_compress() || stackMapId == 0) && hookConfig_.string_compressed()) {
626         size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
627         for (; idx < callFrames.size(); ++idx) {
628             ReportSymbolNameMap(callFrames[idx], batchNativeHookData);
629             ReportFilePathMap(callFrames[idx], batchNativeHookData);
630         }
631     }
632 
633     NativeHookData* hookData = batchNativeHookData.add_events();
634     hookData->set_tv_sec(rawStack->stackConext->ts.tv_sec);
635     hookData->set_tv_nsec(rawStack->stackConext->ts.tv_nsec);
636 
637     if (rawStack->stackConext->type == MALLOC_MSG) {
638         AllocEvent* allocEvent = hookData->mutable_alloc_event();
639 #ifdef USE_JEMALLOC
640         allocEvent->set_size(static_cast<uint64_t>(ComputeAlign(rawStack->stackConext->mallocSize)));
641 #else
642         allocEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
643 #endif
644         allocEvent->set_thread_name_id(rawStack->stackConext->tid);
645         SetEventFrame(rawStack, callFrames, batchNativeHookData, allocEvent, stackMapId);
646     } else if (rawStack->stackConext->type == FREE_MSG) {
647         FreeEvent* freeEvent = hookData->mutable_free_event();
648         freeEvent->set_thread_name_id(rawStack->stackConext->tid);
649         SetEventFrame(rawStack, callFrames, batchNativeHookData, freeEvent, stackMapId);
650     } else if (rawStack->stackConext->type == MMAP_MSG) {
651         MmapEvent* mmapEvent = hookData->mutable_mmap_event();
652         mmapEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
653         mmapEvent->set_thread_name_id(rawStack->stackConext->tid);
654         SetEventFrame(rawStack, callFrames, batchNativeHookData, mmapEvent, stackMapId);
655     } else if (rawStack->stackConext->type == MMAP_FILE_PAGE_MSG) {
656         MmapEvent* mmapEvent = hookData->mutable_mmap_event();
657         mmapEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
658         mmapEvent->set_thread_name_id(rawStack->stackConext->tid);
659         const std::string prefix = "FilePage:";
660         std::string tagName;
661         if (GetMemTag(rawStack->stackConext->tagId, tagName)) {
662             mmapEvent->set_type(prefix + tagName);
663         }
664         SetEventFrame(rawStack, callFrames, batchNativeHookData, mmapEvent, stackMapId);
665     } else if (rawStack->stackConext->type == MUNMAP_MSG) {
666         MunmapEvent* munmapEvent = hookData->mutable_munmap_event();
667         munmapEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
668         munmapEvent->set_thread_name_id(rawStack->stackConext->tid);
669         SetEventFrame(rawStack, callFrames, batchNativeHookData, munmapEvent, stackMapId);
670     } else if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
671         MemTagEvent* tagEvent = hookData->mutable_tag_event();
672         const std::string prefix = "Anonymous:";
673         std::string tagName(reinterpret_cast<char*>(rawStack->data));
674         tagEvent->set_tag(prefix + tagName);
675         tagEvent->set_size(rawStack->stackConext->mallocSize);
676         tagEvent->set_addr((uint64_t)rawStack->stackConext->addr);
677         tagEvent->set_pid(pid_);
678     } else if (rawStack->stackConext->type == MEMORY_USING_MSG) {
679         MmapEvent* mmapEvent = hookData->mutable_mmap_event();
680         mmapEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
681         mmapEvent->set_thread_name_id(rawStack->stackConext->tid);
682         std::string tagName;
683         if (GetMemTag(rawStack->stackConext->tagId, tagName)) {
684             mmapEvent->set_type(tagName);
685         }
686         SetEventFrame(rawStack, callFrames, batchNativeHookData, mmapEvent, stackMapId);
687     } else if (rawStack->stackConext->type == MEMORY_UNUSING_MSG) {
688         MunmapEvent* munmapEvent = hookData->mutable_munmap_event();
689         munmapEvent->set_size(static_cast<uint64_t>(rawStack->stackConext->mallocSize));
690         munmapEvent->set_thread_name_id(rawStack->stackConext->tid);
691         SetEventFrame(rawStack, callFrames, batchNativeHookData, munmapEvent, stackMapId);
692     }
693 }
694 
SetFreeStatisticsData(uint64_t addr)695 inline bool StackPreprocess::SetFreeStatisticsData(uint64_t addr)
696 {
697     // through the addr lookup record
698     std::lock_guard<std::mutex> guard(mtex_);
699     auto addrIter = allocAddrMap_.find(addr);
700     if (addrIter != allocAddrMap_.end()) {
701         auto& record = addrIter->second.second;
702         ++record->releaseCount;
703         record->releaseSize += addrIter->second.first;
704         statisticsPeriodData_[record->callstackId] = record;
705         allocAddrMap_.erase(addr);
706         return true;
707     }
708     return false;
709 }
710 
SetAllocStatisticsData(const RawStackPtr & rawStack,size_t stackId,bool isExists)711 inline void StackPreprocess::SetAllocStatisticsData(const RawStackPtr& rawStack, size_t stackId, bool isExists)
712 {
713     // if the record exists, it is updated.Otherwise Add
714     if (isExists) {
715         auto recordIter = recordStatisticsMap_.find(stackId);
716         if (recordIter != recordStatisticsMap_.end()) {
717             auto& record = recordIter->second;
718             ++record.applyCount;
719             record.applySize += rawStack->stackConext->mallocSize;
720             std::lock_guard<std::mutex> guard(mtex_);
721             allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
722                 std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
723             statisticsPeriodData_[stackId] = &recordIter->second;
724         }
725     } else {
726         RecordStatistic record;
727         record.pid = rawStack->stackConext->pid;
728         record.callstackId = stackId;
729         record.applyCount = 1;
730         record.applySize = rawStack->stackConext->mallocSize;
731         switch (rawStack->stackConext->type) {
732             case MALLOC_MSG: {
733                 record.type = RecordStatisticsEvent::MALLOC;
734                 break;
735             }
736             case MMAP_MSG: {
737                 record.type = RecordStatisticsEvent::MMAP;
738                 break;
739             }
740             case MMAP_FILE_PAGE_MSG: {
741                 record.type = RecordStatisticsEvent::FILE_PAGE_MSG;
742                 break;
743             }
744             case MEMORY_USING_MSG: {
745                 record.type = RecordStatisticsEvent::MEMORY_USING_MSG;
746                 record.tagId = rawStack->stackConext->tagId;
747                 break;
748             }
749             default: {
750                 PROFILER_LOG_ERROR(LOG_CORE, "SetAllocStatisticsData event type error");
751                 break;
752             }
753         }
754 
755         auto [recordIter, stat] = recordStatisticsMap_.emplace(stackId, record);
756         std::lock_guard<std::mutex> guard(mtex_);
757         allocAddrMap_[(uint64_t)rawStack->stackConext->addr] =
758             std::pair(rawStack->stackConext->mallocSize, &recordIter->second);
759         statisticsPeriodData_[stackId] = &recordIter->second;
760     }
761 }
762 
WriteFrames(RawStackPtr rawStack,const std::vector<CallFrame> & callFrames)763 void StackPreprocess::WriteFrames(RawStackPtr rawStack, const std::vector<CallFrame>& callFrames)
764 {
765     CHECK_TRUE(fpHookData_ != nullptr, NO_RETVAL, "fpHookData_ is nullptr, please check file_name(%s)",
766         hookConfig_.file_name().c_str());
767     if (rawStack->stackConext->type == PR_SET_VMA_MSG) {
768         const std::string prefix = "Anonymous:";
769         std::string tagName;
770         GetMemTag(rawStack->stackConext->tagId, tagName);
771         fprintf(fpHookData_, "prctl;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ":tag:%s\n",
772             rawStack->stackConext->pid, rawStack->stackConext->tid,
773             (int64_t)rawStack->stackConext->ts.tv_sec, rawStack->stackConext->ts.tv_nsec,
774             (uint64_t)rawStack->stackConext->addr, (prefix + tagName).c_str());
775         return;
776     }
777     std::string tag = "";
778     switch (rawStack->stackConext->type) {
779         case FREE_MSG:
780             tag = "free";
781             break;
782         case MALLOC_MSG:
783             tag = "malloc";
784             break;
785         case MMAP_MSG:
786             tag = "mmap";
787             break;
788         case MUNMAP_MSG:
789             tag = "munmap";
790             break;
791         default:
792             break;
793     }
794 
795     fprintf(fpHookData_, "%s;%u;%u;%" PRId64 ";%ld;0x%" PRIx64 ";%zu\n", tag.c_str(),
796         rawStack->stackConext->pid, rawStack->stackConext->tid, (int64_t)rawStack->stackConext->ts.tv_sec,
797         rawStack->stackConext->ts.tv_nsec, (uint64_t)rawStack->stackConext->addr, rawStack->stackConext->mallocSize);
798     size_t idx = hookConfig_.fp_unwind() ? 0 : FILTER_STACK_DEPTH;
799     for (; idx < callFrames.size(); ++idx) {
800         (void)fprintf(fpHookData_, "0x%" PRIx64 ";0x%" PRIx64 ";%s;%s;0x%" PRIx64 ";%" PRIu64 "\n",
801             callFrames[idx].ip_, callFrames[idx].sp_, std::string(callFrames[idx].symbolName_).c_str(),
802             std::string(callFrames[idx].filePath_).c_str(), callFrames[idx].offset_, callFrames[idx].symbolOffset_);
803     }
804 }
805 
SetFrameInfo(Frame & frame,CallFrame & callFrame)806 inline void StackPreprocess::SetFrameInfo(Frame& frame, CallFrame& callFrame)
807 {
808     frame.set_ip(callFrame.ip_);
809     if (hookConfig_.offline_symbolization()) {
810         return;
811     }
812     frame.set_sp(callFrame.sp_);
813     frame.set_offset(callFrame.offset_);
814     frame.set_symbol_offset(callFrame.symbolOffset_);
815 
816     if (callFrame.symbolNameId_ != 0 && callFrame.filePathId_ != 0) {
817         frame.set_symbol_name_id(callFrame.symbolNameId_);
818         frame.set_file_path_id(callFrame.filePathId_);
819     } else {
820         frame.set_symbol_name(std::string(callFrame.symbolName_));
821         frame.set_file_path(std::string(callFrame.filePath_));
822     }
823 }
824 
ReportSymbolNameMap(CallFrame & callFrame,BatchNativeHookData & batchNativeHookData)825 inline void StackPreprocess::ReportSymbolNameMap(CallFrame& callFrame, BatchNativeHookData& batchNativeHookData)
826 {
827     if (callFrame.needReport_ & SYMBOL_NAME_ID_REPORT) {
828         auto hookData = batchNativeHookData.add_events();
829         SymbolMap* symbolMap = hookData->mutable_symbol_name();
830         symbolMap->set_id(callFrame.symbolNameId_);
831         symbolMap->set_name(std::string(callFrame.symbolName_));
832         symbolMap->set_pid(pid_);
833     }
834 }
835 
ReportFilePathMap(CallFrame & callFrame,BatchNativeHookData & batchNativeHookData)836 inline void StackPreprocess::ReportFilePathMap(CallFrame& callFrame, BatchNativeHookData& batchNativeHookData)
837 {
838     if (callFrame.needReport_ & FILE_PATH_ID_REPORT) {
839         auto hookData = batchNativeHookData.add_events();
840         FilePathMap* filePathMap = hookData->mutable_file_path();
841         filePathMap->set_id(callFrame.filePathId_);
842         filePathMap->set_name(std::string(callFrame.filePath_));
843         filePathMap->set_pid(pid_);
844     }
845 }
846 
ReportFrameMap(CallFrame & callFrame,BatchNativeHookData & batchNativeHookData)847 inline void StackPreprocess::ReportFrameMap(CallFrame& callFrame, BatchNativeHookData& batchNativeHookData)
848 {
849     if (callFrame.needReport_ & CALL_FRAME_REPORT) {
850         ReportSymbolNameMap(callFrame, batchNativeHookData);
851         ReportFilePathMap(callFrame, batchNativeHookData);
852         auto hookData = batchNativeHookData.add_events();
853         FrameMap* frameMap = hookData->mutable_frame_map();
854         Frame* frame = frameMap->mutable_frame();
855         SetFrameInfo(*frame, callFrame);
856         frameMap->set_id(callFrame.callFrameId_);
857         frameMap->set_pid(pid_);
858     }
859 }
860 
SetMapsInfo(pid_t pid)861 void StackPreprocess::SetMapsInfo(pid_t pid)
862 {
863     std::lock_guard<std::mutex> guard(mtx_);
864     for (auto& itemSoBegin : runtime_instance->GetOfflineMaps()) {
865         auto& maps = runtime_instance->GetMapsCache();
866         auto mapsIter = maps.find(itemSoBegin);
867         if (mapsIter == maps.end()) {
868             continue;
869         }
870 
871         ElfSymbolTable symbolInfo;
872         auto& curMemMaps = mapsIter->second;
873         GetSymbols(curMemMaps->name_, symbolInfo);
874         if (symbolInfo.symEntSize == 0) {
875             continue;
876         }
877         BatchNativeHookData stackData;
878         NativeHookData* hookData = stackData.add_events();
879         FilePathMap* filepathMap = hookData->mutable_file_path();
880         filepathMap->set_id(curMemMaps->filePathId_);
881         filepathMap->set_name(curMemMaps->name_);
882         filepathMap->set_pid(pid_);
883         SetSymbolInfo(curMemMaps->filePathId_, symbolInfo, stackData);
884 
885         for (auto& map : curMemMaps->GetMaps()) {
886             if (map->prots & PROT_EXEC) {
887                 NativeHookData* nativeHookData = stackData.add_events();
888                 MapsInfo* mapSerialize = nativeHookData->mutable_maps_info();
889                 mapSerialize->set_pid(pid);
890                 mapSerialize->set_start(map->begin);
891                 mapSerialize->set_end(map->end);
892                 mapSerialize->set_offset(map->offset);
893                 mapSerialize->set_file_path_id(curMemMaps->filePathId_);
894             }
895         }
896         FlushData(stackData);
897     }
898     runtime_instance->ClearOfflineMaps();
899 }
900 
SetSymbolInfo(uint32_t filePathId,ElfSymbolTable & symbolInfo,BatchNativeHookData & batchNativeHookData)901 void StackPreprocess::SetSymbolInfo(uint32_t filePathId, ElfSymbolTable& symbolInfo,
902     BatchNativeHookData& batchNativeHookData)
903 {
904     if (symbolInfo.symEntSize == 0) {
905         PROFILER_LOG_ERROR(LOG_CORE, "SetSymbolInfo get symbolInfo failed");
906         return;
907     }
908     NativeHookData* hookData = batchNativeHookData.add_events();
909     SymbolTable* symTable = hookData->mutable_symbol_tab();
910     symTable->set_file_path_id(filePathId);
911     symTable->set_text_exec_vaddr(symbolInfo.textVaddr);
912     symTable->set_text_exec_vaddr_file_offset(symbolInfo.textOffset);
913     symTable->set_sym_entry_size(symbolInfo.symEntSize);
914     symTable->set_sym_table(symbolInfo.symTable.data(), symbolInfo.symTable.size());
915     symTable->set_str_table(symbolInfo.strTable.data(), symbolInfo.strTable.size());
916     symTable->set_pid(pid_);
917 }
918 
FlushData(BatchNativeHookData & stackData)919 void StackPreprocess::FlushData(BatchNativeHookData& stackData)
920 {
921     if (stackData.events().size() > 0) {
922         size_t length = stackData.ByteSizeLong();
923         stackData.SerializeToArray(buffer_.get(), length);
924         if (length < MAX_BUFFER_SIZE) {
925             if (isHookStandaloneSerialize_) {
926                 std::string str;
927                 ForStandard::BatchNativeHookData StandardStackData;
928                 StandardStackData.ParseFromArray(buffer_.get(), length);
929                 google::protobuf::TextFormat::PrintToString(StandardStackData, &str);
930                 size_t n = fwrite(str.data(), 1, str.size(), fpHookData_);
931                 fflush(fpHookData_);
932                 PROFILER_LOG_DEBUG(LOG_CORE, "Flush Data fwrite n = %zu str.size() = %zu", n, str.size());
933             } else {
934                 Flush(buffer_.get(), length);
935             }
936         } else {
937             PROFILER_LOG_ERROR(LOG_CORE, "the data is larger than MAX_BUFFER_SIZE, flush failed");
938         }
939     }
940 }
941 
Flush(const uint8_t * src,size_t size)942 void StackPreprocess::Flush(const uint8_t* src, size_t size)
943 {
944     if (src == nullptr) {
945         PROFILER_LOG_ERROR(LOG_CORE, "Flush src is nullptr");
946         return;
947     }
948     if (isSaService_) {
949         ProfilerPluginData pluginData;
950         pluginData.set_name("nativehook");
951         pluginData.set_version("1.02");
952         pluginData.set_status(0);
953         pluginData.set_data(src, size);
954         struct timespec ts;
955         clock_gettime(pluginDataClockId_, &ts);
956         pluginData.set_clock_id(static_cast<ProfilerPluginData_ClockId>(pluginDataClockId_));
957         pluginData.set_tv_sec(ts.tv_sec);
958         pluginData.set_tv_nsec(ts.tv_nsec);
959         pluginData.SerializeToArray(buffer_.get(), pluginData.ByteSizeLong());
960         size = pluginData.ByteSizeLong();
961     }
962 
963     writer_->Write(buffer_.get(), size);
964     writer_->Flush();
965 }
966 
GetSymbols(const std::string & filePath,ElfSymbolTable & symbols)967 void StackPreprocess::GetSymbols(const std::string& filePath, ElfSymbolTable& symbols)
968 {
969     std::shared_ptr<DfxElf> elfPtr = std::make_shared<DfxElf>(filePath);
970     symbols.textVaddr = elfPtr->GetStartVaddr();
971     symbols.textOffset = elfPtr->GetStartOffset();
972     if (symbols.textVaddr == (std::numeric_limits<uint64_t>::max)()) {
973         PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get textVaddr failed");
974         return;
975     }
976 
977     std::string symSecName;
978     std::string strSecName;
979     ShdrInfo shdr;
980     if (elfPtr->GetSectionInfo(shdr, ".symtab")) {
981         symSecName = ".symtab";
982         strSecName = ".strtab";
983     } else if (elfPtr->GetSectionInfo(shdr, ".dynsym")) {
984         symSecName = ".dynsym";
985         strSecName = ".dynstr";
986     } else {
987         return;
988     }
989     symbols.symEntSize = shdr.entSize;
990     symbols.symTable.resize(shdr.size);
991     if (!elfPtr->GetSectionData(symbols.symTable.data(), shdr.size, symSecName)) {
992         PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get symbol section data failed");
993         return;
994     }
995     if (!elfPtr->GetSectionInfo(shdr, strSecName)) {
996         PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
997         return;
998     }
999     symbols.strTable.resize(shdr.size);
1000     if (!elfPtr->GetSectionData(symbols.strTable.data(), shdr.size, strSecName)) {
1001         PROFILER_LOG_ERROR(LOG_CORE, "GetSymbols get str section failed");
1002         return;
1003     }
1004 }
1005 
FlushRecordStatistics()1006 bool StackPreprocess::FlushRecordStatistics()
1007 {
1008     if (statisticsPeriodData_.empty()) {
1009         return false;
1010     }
1011     struct timespec ts;
1012     clock_gettime(hookDataClockId_, &ts);
1013     BatchNativeHookData statisticsData;
1014     for (auto [addr, statistics] : statisticsPeriodData_) {
1015         NativeHookData* hookData = statisticsData.add_events();
1016         hookData->set_tv_sec(ts.tv_sec);
1017         hookData->set_tv_nsec(ts.tv_nsec);
1018         RecordStatisticsEvent* recordEvent = hookData->mutable_statistics_event();
1019         recordEvent->set_pid(statistics->pid);
1020         recordEvent->set_callstack_id(statistics->callstackId);
1021         recordEvent->set_type(statistics->type);
1022         recordEvent->set_apply_count(statistics->applyCount);
1023         recordEvent->set_release_count(statistics->releaseCount);
1024         recordEvent->set_apply_size(statistics->applySize);
1025         recordEvent->set_release_size(statistics->releaseSize);
1026 
1027         std::string tagName;
1028         if (statistics->type == RecordStatisticsEvent::MEMORY_USING_MSG && GetMemTag(statistics->tagId, tagName)) {
1029             recordEvent->set_tag_name(tagName);
1030         }
1031     }
1032     {
1033         std::lock_guard<std::mutex> guard(mtx_);
1034         FlushData(statisticsData);
1035     }
1036     statisticsPeriodData_.clear();
1037 
1038     return true;
1039 }
1040 
SaveMemTag(uint32_t tagId,const std::string & tagName)1041 void StackPreprocess::SaveMemTag(uint32_t tagId, const std::string& tagName)
1042 {
1043     std::string temp;
1044     bool res = memTagMap_.Find(tagId, temp);
1045     if (!res) {
1046         memTagMap_.EnsureInsert(tagId, tagName);
1047     }
1048 }
1049 
GetMemTag(uint32_t tagId,std::string & tagName)1050 bool StackPreprocess::GetMemTag(uint32_t tagId, std::string& tagName)
1051 {
1052     return memTagMap_.Find(tagId, tagName);
1053 }
1054 
LgFloor(unsigned long val)1055 unsigned StackPreprocess::LgFloor(unsigned long val)
1056 {
1057     val |= (val >> RIGHT_MOVE_1);
1058     val |= (val >> RIGHT_MOVE_2);
1059     val |= (val >> RIGHT_MOVE_4);
1060     val |= (val >> RIGHT_MOVE_8);
1061     val |= (val >> RIGHT_MOVE_16);
1062     if (sizeof(val) > 4) {              // 4: sizeThreshold
1063         int constant = sizeof(val) * 4; // 4: sizeThreshold
1064         val |= (val >> constant);
1065     }
1066     val++;
1067     if (val == 0) {
1068         return 8 * sizeof(val) - 1; // 8: 8byte
1069     }
1070     return __builtin_ffsl(val) - 2; // 2: adjustment
1071 }
1072 
PowCeil(uint64_t val)1073 uint64_t StackPreprocess::PowCeil(uint64_t val)
1074 {
1075     size_t msbIndex = LgFloor(val - 1);
1076     return 1ULL << (msbIndex + 1);
1077 }
1078 
ComputeAlign(size_t size)1079 size_t StackPreprocess::ComputeAlign(size_t size)
1080 {
1081     if (size == 0) {
1082         return 0;
1083     }
1084     unsigned index = 0;
1085     if (size <= (size_t(1) << SC_LG_TINY_MAXCLASS)) {
1086         unsigned lgTmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
1087         unsigned lgCeil = LgFloor(PowCeil(size));
1088         index = (lgCeil < lgTmin) ? 0 : lgCeil - lgTmin;
1089     } else {
1090         unsigned floor = LgFloor((size << 1) - 1);
1091         unsigned shift = (floor < SC_LG_NGROUP + LG_QUANTUM) ? 0 : floor - (SC_LG_NGROUP + LG_QUANTUM);
1092         unsigned grp = shift << SC_LG_NGROUP;
1093         unsigned lgDelta = (floor < SC_LG_NGROUP + LG_QUANTUM + 1) ? LG_QUANTUM : floor - SC_LG_NGROUP - 1;
1094         size_t deltaInverseMask = size_t(-1) << lgDelta;
1095         unsigned mod = ((((size - 1) & deltaInverseMask) >> lgDelta)) & ((size_t(1) << SC_LG_NGROUP) - 1);
1096         index = SC_NTINY + grp + mod;
1097     }
1098 
1099     if (index < NTBINS) {
1100         return (size_t(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
1101     }
1102     size_t reducedIndex = index - NTBINS;
1103     size_t grpVal = reducedIndex >> LG_SIZE_CLASS_GROUP;
1104     size_t modVal = reducedIndex & ((size_t(1) << LG_SIZE_CLASS_GROUP) - 1);
1105     size_t grpSizeMask = ~((!!grpVal) - 1);
1106     size_t grpSize = ((size_t(1) << (LG_QUANTUM + (LG_SIZE_CLASS_GROUP - 1))) << grpVal) & grpSizeMask;
1107     size_t shiftVal = (grpVal == 0) ? 1 : grpVal;
1108     size_t lgDeltaVal = shiftVal + (LG_QUANTUM - 1);
1109     size_t modSize = (modVal + 1) << lgDeltaVal;
1110     size_t usize = grpSize + modSize;
1111     return usize;
1112 }
1113 
WriteHookConfig()1114 void StackPreprocess::WriteHookConfig()
1115 {
1116     std::shared_ptr<TraceFileWriter> tfPtr = std::static_pointer_cast<TraceFileWriter>(writer_);
1117     hookConfig_.SerializeToArray(buffer_.get(), hookConfig_.ByteSizeLong());
1118     tfPtr->WriteStandalonePluginData(
1119         "nativehook_config",
1120         std::string(reinterpret_cast<char*>(buffer_.get()), hookConfig_.ByteSizeLong()));
1121 }