1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <atomic>
17 #include <climits>
18 #include <dlfcn.h>
19 #include <fcntl.h>
20 #include <malloc.h>
21 #include <string>
22 #include <sys/time.h>
23 #include <pthread.h>
24 #include <sys/prctl.h>
25 #include <unordered_map>
26 #include <unordered_set>
27 #include "dfx_regs_get.h"
28 #include "c/executor_task.h"
29 #include "common.h"
30 #include "hook_common.h"
31 #include "hook_socket_client.h"
32 #include "musl_preinit_common.h"
33 #include "parameter.h"
34 #include "stack_writer.h"
35 #include "runtime_stack_range.h"
36 #include "get_thread_id.h"
37 #include "hook_client.h"
38 #include <sys/mman.h>
39 #include "sampling.h"
40 #include "hitrace/trace.h"
41
42 using namespace OHOS::HiviewDFX;
43 using namespace OHOS::Developtools::NativeDaemon;
44
45 static pthread_key_t g_disableHookFlag = 10000;
46 static pthread_key_t g_hookTid;
47 static pthread_key_t g_updateThreadNameCount = 10000;
48 static pthread_once_t g_onceFlag;
49 namespace {
50 static std::atomic<uint64_t> g_mallocTimes = 0;
51
52 enum class MISC_TYPE : uint32_t {
53 JS_STACK_DATA = 1,
54 };
55
56 #ifdef PERFORMANCE_DEBUG
57 static std::atomic<uint64_t> g_timeCost = 0;
58 static std::atomic<uint64_t> g_dataCounts = 0;
59 constexpr int PRINT_INTERVAL = 5000;
60 constexpr uint64_t S_TO_NS = 1000 * 1000 * 1000;
61 #endif
62
63 using OHOS::Developtools::NativeDaemon::buildArchType;
64 static std::shared_ptr<HookSocketClient> g_hookClient {nullptr};
65 static Sampling g_sampler;
66 std::recursive_timed_mutex g_ClientMutex;
67 std::mutex g_tagMapMutex;
68 std::atomic<const MallocDispatchType*> g_dispatch {nullptr};
69 constexpr int UPDATE_THEARD_NAME = 1000;
70 static pid_t g_hookPid = 0;
71 static ClientConfig g_ClientConfig = {0};
72 static uint32_t g_maxSize = INT_MAX;
73 static std::unordered_map<std::string, uint32_t> g_memTagMap;
74 constexpr int PID_STR_SIZE = 4;
75 constexpr int STATUS_LINE_SIZE = 512;
76 constexpr int PID_NAMESPACE_ID = 1; // 1: pid is 1 after pid namespace used
77 constexpr int FD_PATH_LENGTH = 64;
78 constexpr int MIN_SAMPLER_INTERVAL = 1;
79 constexpr int FIRST_HASH = 16;
80 constexpr int SECOND_HASH = 13;
81 constexpr int THRESHOLD = 256;
82 constexpr int DIVIDE_VAL = 64;
83 //5: fp mode is used, response_library_mode maximum stack depth
84 #if defined(__aarch64__)
85 constexpr int RESPONSE_LIBRARY_MODE_DEPTH = 5;
86 constexpr int TEMP_IP = 100;
87 #endif
88 static bool g_isPidChanged = false;
89 static struct mallinfo2 g_miStart = {0};
90 std::vector<std::pair<uint64_t, uint64_t>> g_filterStaLibRange;
91 constexpr int MAX_BITPOOL_SIZE = 1000 * 1024;
92 struct Bitpool {
93 std::atomic<uint64_t> slot;
94 };
95 Bitpool* g_addressChecker = nullptr;
96
AddrHash(uint32_t h)97 inline static uint32_t AddrHash(uint32_t h)
98 {
99 h ^= h >> FIRST_HASH;
100 h *= 0x85ebca6b;
101 h ^= h >> SECOND_HASH;
102 h *= 0xc2b2ae35;
103 h ^= h >> FIRST_HASH;
104 return h;
105 }
106
Addr2Bitpool(void * addr)107 inline void Addr2Bitpool(void* addr)
108 {
109 if (!g_addressChecker) {
110 return;
111 }
112 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL);
113 g_addressChecker[val / DIVIDE_VAL].slot |= (0x1 << (val % DIVIDE_VAL));
114 }
115
IsAddrExist(void * addr)116 inline bool IsAddrExist(void* addr)
117 {
118 if (!g_addressChecker) {
119 return true;
120 }
121 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL);
122 if (g_addressChecker[val / DIVIDE_VAL].slot.load() & (0x1 << (val % DIVIDE_VAL))) {
123 return true;
124 }
125 return false;
126 }
127
GetDispatch()128 const MallocDispatchType* GetDispatch()
129 {
130 return g_dispatch.load(std::memory_order_relaxed);
131 }
132
InititalizeIPC()133 bool InititalizeIPC()
134 {
135 return true;
136 }
FinalizeIPC()137 void FinalizeIPC() {}
138
ConvertPid(char * buf)139 int ConvertPid(char* buf)
140 {
141 int count = 0;
142 char pidBuf[11] = {0}; /* 11: 32 bits to the maximum length of a string */
143 char *str = buf;
144 while (*str != '\0') {
145 if ((*str >= '0') && (*str <= '9') && (static_cast<unsigned long>(count) < sizeof(pidBuf) - 1)) {
146 pidBuf[count] = *str;
147 count++;
148 str++;
149 continue;
150 }
151
152 if (count > 0) {
153 break;
154 }
155 str++;
156 }
157 return atoi(pidBuf);
158 }
159
GetRealPid(void)160 pid_t GetRealPid(void)
161 {
162 const char *path = "/proc/self/status";
163 char buf[STATUS_LINE_SIZE] = {0};
164 FILE *fp = fopen(path, "r");
165 CHECK_NOTNULL(fp, -1, "fopen fail");
166 while (!feof(fp)) {
167 if (fgets(buf, STATUS_LINE_SIZE, fp) == nullptr) {
168 fclose(fp);
169 return -1;
170 }
171 if (strncmp(buf, "Pid:", PID_STR_SIZE) == 0) {
172 break;
173 }
174 }
175 (void)fclose(fp);
176 return static_cast<pid_t>(ConvertPid(buf));
177 }
178 } // namespace
179
GetCurThreadId()180 pid_t inline __attribute__((always_inline)) GetCurThreadId()
181 {
182 if (pthread_getspecific(g_hookTid) == nullptr) {
183 pthread_setspecific(g_hookTid, reinterpret_cast<void *>(GetThreadId()));
184 }
185 return reinterpret_cast<long>((pthread_getspecific(g_hookTid)));
186 }
187
UpdateThreadName(std::shared_ptr<HookSocketClient> & client)188 bool inline __attribute__((always_inline)) UpdateThreadName(std::shared_ptr<HookSocketClient>& client)
189 {
190 long updateCount = reinterpret_cast<long>(pthread_getspecific(g_updateThreadNameCount));
191 bool ret = true;
192 if (updateCount == 0) {
193 StackRawData tnameData = {{{{0}}}};
194 tnameData.tid = static_cast<uint32_t>(GetCurThreadId());
195 tnameData.type = THREAD_NAME_MSG;
196 prctl(PR_GET_NAME, tnameData.name);
197 ret = client->SendStackWithPayload(&tnameData,
198 sizeof(BaseStackRawData) + strlen(tnameData.name) + 1, nullptr, 0);
199 if (!ret) {
200 return ret;
201 }
202 }
203 pthread_setspecific(g_updateThreadNameCount,
204 reinterpret_cast<void *>(updateCount == UPDATE_THEARD_NAME ? 0 : updateCount + 1));
205 return ret;
206 }
207
GetTagId(std::shared_ptr<HookSocketClient> & client,const char * tagName)208 uint32_t inline __attribute__((always_inline)) GetTagId(std::shared_ptr<HookSocketClient>& client, const char* tagName)
209 {
210 if (tagName == nullptr || strlen(tagName) > PATH_MAX) {
211 return 0;
212 }
213 uint32_t tagId = 0;
214 bool isNewTag = false;
215 std::unique_lock<std::mutex> lock(g_tagMapMutex);
216 auto it = g_memTagMap.find(tagName);
217 if (it == g_memTagMap.end()) {
218 isNewTag = true;
219 tagId = g_memTagMap.size() + 1;
220 g_memTagMap[tagName] = tagId;
221 } else {
222 tagId = it->second;
223 }
224 lock.unlock();
225 if (isNewTag) {
226 StackRawData tagData = {{{{0}}}};
227 tagData.type = MEMORY_TAG;
228 tagData.tagId = tagId;
229 strcpy_s(tagData.name, PATH_MAX + 1, tagName);
230 if (client != nullptr) {
231 client->SendStackWithPayload(&tagData, sizeof(BaseStackRawData) + strlen(tagName) + 1, nullptr, 0);
232 }
233 }
234 return tagId;
235 }
236
237 static bool IsPidChanged(void);
238
MallocHookStart(void * disableHookCallback)239 void* MallocHookStart(void* disableHookCallback)
240 {
241 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
242 g_addressChecker = new Bitpool [MAX_BITPOOL_SIZE] {{0}};
243 g_mallocTimes = 0;
244 g_hookClient.reset();
245 if (g_hookClient != nullptr) {
246 return nullptr;
247 } else {
248 g_ClientConfig.Reset();
249 g_sampler.Reset();
250 g_hookClient = std::make_shared<HookSocketClient>(g_hookPid, &g_ClientConfig, &g_sampler,
251 reinterpret_cast<void (*)()>(disableHookCallback));
252 }
253 return nullptr;
254 }
255
InitHookTidKey()256 void InitHookTidKey()
257 {
258 if (pthread_key_create(&g_hookTid, nullptr) != 0) {
259 return;
260 }
261 pthread_setspecific(g_hookTid, nullptr);
262 }
263
InitThreadKey()264 bool InitThreadKey()
265 {
266 if (g_disableHookFlag != 10000) { // 10000: initial value
267 pthread_key_delete(g_disableHookFlag);
268 }
269 if (pthread_key_create(&g_disableHookFlag, nullptr) != 0) {
270 return false;
271 }
272 pthread_setspecific(g_disableHookFlag, nullptr);
273 pthread_once(&g_onceFlag, InitHookTidKey);
274 if (g_updateThreadNameCount != 10000) { // 10000: initial value
275 pthread_key_delete(g_updateThreadNameCount);
276 }
277 if (pthread_key_create(&g_updateThreadNameCount, nullptr) != 0) {
278 return false;
279 }
280 pthread_setspecific(g_updateThreadNameCount, reinterpret_cast<void *>(0));
281 return true;
282 }
283
ohos_malloc_hook_on_start(void (* disableHookCallback)())284 bool ohos_malloc_hook_on_start(void (*disableHookCallback)())
285 {
286 pthread_t threadStart;
287 if (pthread_create(&threadStart, nullptr, MallocHookStart,
288 reinterpret_cast<void *>(disableHookCallback))) {
289 return false;
290 }
291 pthread_detach(threadStart);
292 g_hookPid = GetRealPid();
293 if (!InitThreadKey()) {
294 return false;
295 }
296 GetMainThreadRuntimeStackRange(g_filterStaLibRange);
297 constexpr int paramBufferLen = 128;
298 char paramOutBuf[paramBufferLen] = {0};
299 int ret = GetParameter("persist.hiviewdfx.profiler.mem.filter", "", paramOutBuf, paramBufferLen);
300 if (ret > 0) {
301 int min = 0;
302 int max = 0;
303 if (sscanf_s(paramOutBuf, "%d,%d", &min, &max) == 2) { // 2: two parameters.
304 g_maxSize = max > 0 ? max : INT_MAX;
305 g_ClientConfig.filterSize = min > 0 ? min : 0;
306 }
307 }
308 return true;
309 }
310
ohos_release_on_end(void *)311 void* ohos_release_on_end(void*)
312 {
313 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
314 delete [] g_addressChecker;
315 g_addressChecker = nullptr;
316 g_hookClient = nullptr;
317 g_ClientConfig.Reset();
318 return nullptr;
319 }
320
ohos_malloc_hook_on_end(void)321 bool ohos_malloc_hook_on_end(void)
322 {
323 {
324 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
325 if (g_hookClient != nullptr) {
326 if (g_hookClient->GetNmdType() == 1) {
327 g_hookClient->SendNmdInfo();
328 }
329 g_hookClient->SendEndMsg();
330 g_hookClient->Flush();
331 }
332 }
333 pthread_t threadEnd;
334 if (pthread_create(&threadEnd, nullptr, ohos_release_on_end, nullptr)) {
335 return false;
336 }
337 pthread_detach(threadEnd);
338 return true;
339 }
340
FilterStandardSoIp(uint64_t ip)341 bool FilterStandardSoIp(uint64_t ip)
342 {
343 for (auto [soBegin, soEnd_]: g_filterStaLibRange) {
344 if (ip >= soBegin && ip < soEnd_) {
345 return true;
346 }
347 }
348 return false;
349 }
350
351 #if defined(__aarch64__)
FpUnwind(int maxDepth,uint64_t * ip,int stackSize,const char * startPtr,const char * endPtr)352 static int inline __attribute__((always_inline)) FpUnwind(int maxDepth, uint64_t* ip, int stackSize,
353 const char* startPtr, const char* endPtr)
354 {
355 void** startfp = (void**)__builtin_frame_address(0);
356 void** fp = startfp;
357 int depth = 0;
358 int count = 0;
359 uint64_t tempIp = 0;
360 while (depth < maxDepth) {
361 if (fp < (void**)startPtr || (fp + 1) >= (void**)endPtr) {
362 break;
363 }
364 void** nextFp = (void**)*fp;
365 if (nextFp <= fp) {
366 break;
367 }
368 if (((nextFp - startfp) * sizeof(void*)) > static_cast<unsigned long>(stackSize)) {
369 break;
370 }
371 fp = nextFp;
372 tempIp = *(reinterpret_cast<unsigned long*>(fp + 1));
373 if (tempIp <= TEMP_IP) {
374 break;
375 }
376 if (g_ClientConfig.responseLibraryMode) {
377 if (++count >= RESPONSE_LIBRARY_MODE_DEPTH || !FilterStandardSoIp(tempIp)) {
378 break;
379 }
380 } else {
381 ip[depth++] = tempIp;
382 }
383 }
384 if (g_ClientConfig.responseLibraryMode) {
385 ip[0] = tempIp;
386 depth = 1;
387 }
388 return depth;
389 }
390
getJsChainId()391 uint64_t getJsChainId()
392 {
393 if (g_ClientConfig.arktsConfig.jsStackReport > 0) {
394 OHOS::HiviewDFX::HiTraceId hitraceId = OHOS::HiviewDFX::HiTraceChain::GetId();
395 if (hitraceId.IsValid()) {
396 return hitraceId.GetChainId();
397 }
398 }
399 return 0;
400 }
401 #endif
402
hook_malloc(void * (* fn)(size_t),size_t size)403 void* hook_malloc(void* (*fn)(size_t), size_t size)
404 {
405 void* ret = nullptr;
406 if (fn) {
407 ret = fn(size);
408 }
409 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
410 return ret;
411 }
412 if (!ohos_set_filter_size(size, ret)) {
413 return ret;
414 }
415
416 #ifdef PERFORMANCE_DEBUG
417 struct timespec start = {};
418 clock_gettime(CLOCK_REALTIME, &start);
419 #endif
420
421 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) { //0 not sampling
422 #ifdef PERFORMANCE_DEBUG
423 g_mallocTimes++;
424 struct timespec end = {};
425 clock_gettime(CLOCK_REALTIME, &end);
426 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
427 if (g_mallocTimes % PRINT_INTERVAL == 0) {
428 PROFILER_LOG_ERROR(LOG_CORE,
429 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
430 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
431 }
432 #endif
433 return ret;
434 }
435
436 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
437 auto holder = weakClient.lock();
438 if (holder == nullptr) {
439 return ret;
440 }
441 if (!UpdateThreadName(holder)) {
442 return ret;
443 }
444 StackRawData rawdata = {{{{0}}}};
445 const char* stackptr = nullptr;
446 const char* stackendptr = nullptr;
447 int stackSize = 0;
448 int fpStackDepth = 0;
449 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
450
451 if (g_ClientConfig.fpunwind) {
452 #ifdef __aarch64__
453 void* stackAddr = nullptr;
454 size_t coroutineStackSize = 0;
455 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
456 stackSize = static_cast<int>(coroutineStackSize);
457 stackptr = reinterpret_cast<const char*>(stackAddr);
458 stackendptr = stackptr + coroutineStackSize;
459 } else {
460 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
461 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
462 stackSize = stackendptr - stackptr;
463 }
464 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
465 stackSize = 0;
466 rawdata.jsChainId = getJsChainId();
467 #endif
468 } else {
469 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
470 GetLocalRegs(regs);
471 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
472 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
473 stackSize = stackendptr - stackptr;
474 if (stackendptr == nullptr) {
475 stackSize = 0;
476 }
477 }
478 rawdata.type = MALLOC_MSG;
479 rawdata.pid = static_cast<uint32_t>(g_hookPid);
480 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
481 rawdata.mallocSize = size;
482 rawdata.addr = ret;
483 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
484 Addr2Bitpool(ret);
485 }
486 int realSize = 0;
487 if (g_ClientConfig.fpunwind) {
488 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
489 } else {
490 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
491 }
492 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
493 g_mallocTimes++;
494 #ifdef PERFORMANCE_DEBUG
495 struct timespec end = {};
496 clock_gettime(CLOCK_REALTIME, &end);
497 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
498 g_dataCounts += stackSize;
499 if (g_mallocTimes % PRINT_INTERVAL == 0) {
500 PROFILER_LOG_ERROR(LOG_CORE,
501 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
502 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
503 }
504 #endif
505 return ret;
506 }
507
hook_aligned_alloc(void * (* fn)(size_t,size_t),size_t align,size_t len)508 void* hook_aligned_alloc(void* (*fn)(size_t, size_t), size_t align, size_t len)
509 {
510 void* ret = nullptr;
511 if (fn) {
512 ret = fn(align, len);
513 }
514 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
515 return ret;
516 }
517 if (!ohos_set_filter_size(len, ret)) {
518 return ret;
519 }
520
521 #ifdef PERFORMANCE_DEBUG
522 struct timespec start = {};
523 clock_gettime(CLOCK_REALTIME, &start);
524 #endif
525
526 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(len) == 0) { //0 not sampling
527 #ifdef PERFORMANCE_DEBUG
528 g_mallocTimes++;
529 struct timespec end = {};
530 clock_gettime(CLOCK_REALTIME, &end);
531 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
532 if (g_mallocTimes % PRINT_INTERVAL == 0) {
533 PROFILER_LOG_ERROR(LOG_CORE,
534 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %"
535 PRIu64"\n", g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(),
536 g_timeCost.load() / g_mallocTimes.load());
537 }
538 #endif
539 return ret;
540 }
541
542 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
543 auto holder = weakClient.lock();
544 if (holder == nullptr) {
545 return ret;
546 }
547 if (!UpdateThreadName(holder)) {
548 return ret;
549 }
550 StackRawData rawdata = {{{{0}}}};
551 const char* stackptr = nullptr;
552 const char* stackendptr = nullptr;
553 int stackSize = 0;
554 int fpStackDepth = 0;
555 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
556
557 if (g_ClientConfig.fpunwind) {
558 #ifdef __aarch64__
559 void* stackAddr = nullptr;
560 size_t coroutineStackSize = 0;
561 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
562 stackSize = static_cast<int>(coroutineStackSize);
563 stackptr = reinterpret_cast<const char*>(stackAddr);
564 stackendptr = stackptr + coroutineStackSize;
565 } else {
566 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
567 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
568 stackSize = stackendptr - stackptr;
569 }
570 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
571 stackSize = 0;
572 rawdata.jsChainId = getJsChainId();
573 #endif
574 } else {
575 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
576 GetLocalRegs(regs);
577 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
578 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
579 stackSize = stackendptr - stackptr;
580 if (stackendptr == nullptr) {
581 stackSize = 0;
582 }
583 }
584 rawdata.type = MALLOC_MSG;
585 rawdata.pid = static_cast<uint32_t>(g_hookPid);
586 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
587 rawdata.mallocSize = len;
588 rawdata.addr = ret;
589 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
590 Addr2Bitpool(ret);
591 }
592 int realSize = 0;
593 if (g_ClientConfig.fpunwind) {
594 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
595 } else {
596 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
597 }
598 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
599 g_mallocTimes++;
600 #ifdef PERFORMANCE_DEBUG
601 struct timespec end = {};
602 clock_gettime(CLOCK_REALTIME, &end);
603 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
604 g_dataCounts += stackSize;
605 if (g_mallocTimes % PRINT_INTERVAL == 0) {
606 PROFILER_LOG_ERROR(LOG_CORE,
607 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %"
608 PRIu64"\n", g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(),
609 g_timeCost.load() / g_mallocTimes.load());
610 }
611 #endif
612 return ret;
613 }
614
hook_valloc(void * (* fn)(size_t),size_t size)615 void* hook_valloc(void* (*fn)(size_t), size_t size)
616 {
617 void* pRet = nullptr;
618 if (fn) {
619 pRet = fn(size);
620 }
621 return pRet;
622 }
623
hook_calloc(void * (* fn)(size_t,size_t),size_t number,size_t size)624 void* hook_calloc(void* (*fn)(size_t, size_t), size_t number, size_t size)
625 {
626 void* pRet = nullptr;
627 if (fn) {
628 pRet = fn(number, size);
629 }
630 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
631 return pRet;
632 }
633 if (!ohos_set_filter_size(number * size, pRet)) {
634 return pRet;
635 }
636
637 #ifdef PERFORMANCE_DEBUG
638 struct timespec start = {};
639 clock_gettime(CLOCK_REALTIME, &start);
640 #endif
641
642 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size * number) == 0) {
643 #ifdef PERFORMANCE_DEBUG
644 g_mallocTimes++;
645 struct timespec end = {};
646 clock_gettime(CLOCK_REALTIME, &end);
647 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
648 if (g_mallocTimes % PRINT_INTERVAL == 0) {
649 PROFILER_LOG_ERROR(LOG_CORE,
650 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
651 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
652 }
653 #endif
654 return pRet;
655 }
656 StackRawData rawdata = {{{{0}}}};
657 const char* stackptr = nullptr;
658 const char* stackendptr = nullptr;
659 int stackSize = 0;
660 int fpStackDepth = 0;
661 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
662
663 if (g_ClientConfig.fpunwind) {
664 #ifdef __aarch64__
665 void* stackAddr = nullptr;
666 size_t coroutineStackSize = 0;
667 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
668 stackSize = static_cast<int>(coroutineStackSize);
669 stackptr = reinterpret_cast<const char*>(stackAddr);
670 stackendptr = stackptr + coroutineStackSize;
671 } else {
672 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
673 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
674 stackSize = stackendptr - stackptr;
675 }
676 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
677 stackSize = 0;
678 rawdata.jsChainId = getJsChainId();
679 #endif
680 } else {
681 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
682 GetLocalRegs(regs);
683 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
684 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
685 stackSize = stackendptr - stackptr;
686 if (stackendptr == nullptr) {
687 stackSize = 0;
688 }
689 }
690
691 rawdata.type = MALLOC_MSG;
692 rawdata.pid = static_cast<uint32_t>(g_hookPid);
693 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
694 rawdata.mallocSize = number * size;
695 rawdata.addr = pRet;
696 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
697 Addr2Bitpool(pRet);
698 }
699 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
700 auto holder = weakClient.lock();
701 if (holder != nullptr) {
702 int realSize = 0;
703 if (g_ClientConfig.fpunwind) {
704 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
705 } else {
706 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
707 }
708 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
709 }
710 g_mallocTimes++;
711 #ifdef PERFORMANCE_DEBUG
712 struct timespec end = {};
713 clock_gettime(CLOCK_REALTIME, &end);
714 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
715 if (g_mallocTimes % PRINT_INTERVAL == 0) {
716 PROFILER_LOG_ERROR(LOG_CORE,
717 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
718 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
719 }
720 #endif
721 return pRet;
722 }
723
hook_memalign(void * (* fn)(size_t,size_t),size_t align,size_t bytes)724 void* hook_memalign(void* (*fn)(size_t, size_t), size_t align, size_t bytes)
725 {
726 void* pRet = nullptr;
727 if (fn) {
728 pRet = fn(align, bytes);
729 }
730 return pRet;
731 }
732
hook_realloc(void * (* fn)(void *,size_t),void * ptr,size_t size)733 void* hook_realloc(void* (*fn)(void*, size_t), void* ptr, size_t size)
734 {
735 void* pRet = nullptr;
736 if (fn) {
737 pRet = fn(ptr, size);
738 }
739 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
740 return pRet;
741 }
742 if (!ohos_set_filter_size(size, pRet)) {
743 return pRet;
744 }
745
746 #ifdef PERFORMANCE_DEBUG
747 struct timespec start = {};
748 clock_gettime(CLOCK_REALTIME, &start);
749 #endif
750
751 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) {
752 #ifdef PERFORMANCE_DEBUG
753 g_mallocTimes++;
754 struct timespec end = {};
755 clock_gettime(CLOCK_REALTIME, &end);
756 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
757 if (g_mallocTimes % PRINT_INTERVAL == 0) {
758 PROFILER_LOG_ERROR(LOG_CORE,
759 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
760 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
761 }
762 #endif
763 return pRet;
764 }
765 StackRawData rawdata = {{{{0}}}};
766 StackRawData freeData = {{{{0}}}};
767 const char* stackptr = nullptr;
768 const char* stackendptr = nullptr;
769 int stackSize = 0;
770 int fpStackDepth = 0;
771 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
772
773 if (g_ClientConfig.fpunwind) {
774 #ifdef __aarch64__
775 void* stackAddr = nullptr;
776 size_t coroutineStackSize = 0;
777 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
778 stackSize = static_cast<int>(coroutineStackSize);
779 stackptr = reinterpret_cast<const char*>(stackAddr);
780 stackendptr = stackptr + coroutineStackSize;
781 } else {
782 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
783 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
784 stackSize = stackendptr - stackptr;
785 }
786 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
787 stackSize = 0;
788 if (g_ClientConfig.freeStackData) {
789 (void)memcpy_s(freeData.ip, sizeof(freeData.ip) / sizeof(uint64_t),
790 rawdata.ip, sizeof(rawdata.ip) / sizeof(uint64_t));
791 }
792 #endif
793 } else {
794 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
795 GetLocalRegs(regs);
796 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
797 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
798 stackSize = stackendptr - stackptr;
799 if (stackendptr == nullptr) {
800 stackSize = 0;
801 }
802 if (g_ClientConfig.freeStackData) {
803 (void)memcpy_s(freeData.regs, sizeof(freeData.regs) / sizeof(char),
804 rawdata.regs, sizeof(rawdata.regs) / sizeof(char));
805 }
806 }
807
808 rawdata.type = MALLOC_MSG;
809 rawdata.pid = static_cast<uint32_t>(g_hookPid);
810 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
811 rawdata.mallocSize = size;
812 rawdata.addr = pRet;
813 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
814 Addr2Bitpool(pRet);
815 }
816 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
817 auto holder = weakClient.lock();
818 if (holder != nullptr) {
819 int realSize = 0;
820 int freeRealSize = 0;
821 freeData.type = FREE_MSG;
822 freeData.pid = rawdata.pid;
823 freeData.tid = rawdata.tid;
824 freeData.mallocSize = 0;
825 freeData.addr = ptr;
826 freeData.ts = rawdata.ts;
827 if (g_ClientConfig.fpunwind) {
828 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
829 freeRealSize = sizeof(BaseStackRawData);
830 } else {
831 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
832 freeRealSize = realSize;
833 }
834 holder->SendStackWithPayload(&freeData, freeRealSize, nullptr, 0); // 0: Don't unwind the freeData
835 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
836 }
837 #ifdef PERFORMANCE_DEBUG
838 g_mallocTimes++;
839 struct timespec end = {};
840 clock_gettime(CLOCK_REALTIME, &end);
841 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
842 if (g_mallocTimes % PRINT_INTERVAL == 0) {
843 PROFILER_LOG_ERROR(LOG_CORE,
844 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
845 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
846 }
847 #endif
848 return pRet;
849 }
850
hook_malloc_usable_size(size_t (* fn)(void *),void * ptr)851 size_t hook_malloc_usable_size(size_t (*fn)(void*), void* ptr)
852 {
853 size_t ret = 0;
854 if (fn) {
855 ret = fn(ptr);
856 }
857
858 return ret;
859 }
860
hook_free(void (* free_func)(void *),void * p)861 void hook_free(void (*free_func)(void*), void* p)
862 {
863 if (g_ClientConfig.statisticsInterval > 0) {
864 if (!free_func) {
865 return;
866 }
867 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
868 free_func(p);
869 return;
870 }
871 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
872 if (!IsAddrExist(p)) {
873 free_func(p);
874 return;
875 }
876 }
877 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
878 auto holder = weakClient.lock();
879 if ((holder != nullptr) && p) {
880 holder->SendStackWithPayload(&p, sizeof(uint64_t), nullptr, 0);
881 }
882 free_func(p);
883 #ifdef PERFORMANCE_DEBUG
884 g_mallocTimes++;
885 struct timespec end = {};
886 clock_gettime(CLOCK_REALTIME, &end);
887 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
888 if (g_mallocTimes % PRINT_INTERVAL == 0) {
889 PROFILER_LOG_ERROR(LOG_CORE,
890 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
891 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
892 }
893 #endif
894 return;
895 }
896 struct timespec freeTime = {};
897 clock_gettime(g_ClientConfig.clockId, &freeTime);
898 if (free_func) {
899 free_func(p);
900 }
901 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
902 return;
903 }
904 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
905 if (!IsAddrExist(p)) {
906 return;
907 }
908 }
909 #ifdef PERFORMANCE_DEBUG
910 struct timespec start = {};
911 clock_gettime(CLOCK_REALTIME, &start);
912 #endif
913
914 StackRawData rawdata = {{{{0}}}};
915 const char* stackptr = nullptr;
916 const char* stackendptr = nullptr;
917 int stackSize = 0;
918 int fpStackDepth = 0;
919 rawdata.ts = freeTime;
920
921 if (g_ClientConfig.freeStackData) {
922 if (g_ClientConfig.fpunwind) {
923 #ifdef __aarch64__
924 void* stackAddr = nullptr;
925 size_t coroutineStackSize = 0;
926 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
927 stackSize = static_cast<int>(coroutineStackSize);
928 stackptr = reinterpret_cast<const char*>(stackAddr);
929 stackendptr = stackptr + coroutineStackSize;
930 } else {
931 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
932 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
933 stackSize = stackendptr - stackptr;
934 }
935 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
936 stackSize = 0;
937 rawdata.jsChainId = getJsChainId();
938 #endif
939 } else {
940 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
941 GetLocalRegs(regs);
942 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
943 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
944 stackSize = stackendptr - stackptr;
945 if (stackendptr == nullptr) {
946 stackSize = 0;
947 }
948 }
949 }
950
951 rawdata.type = FREE_MSG;
952 rawdata.pid = static_cast<uint32_t>(g_hookPid);
953 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
954 rawdata.mallocSize = 0;
955 rawdata.addr = p;
956 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
957 auto holder = weakClient.lock();
958 if (holder != nullptr) {
959 int realSize = 0;
960 if (g_ClientConfig.fpunwind) {
961 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
962 } else {
963 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
964 }
965 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
966 }
967 #ifdef PERFORMANCE_DEBUG
968 g_mallocTimes++;
969 struct timespec end = {};
970 clock_gettime(CLOCK_REALTIME, &end);
971 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
972 if (g_mallocTimes % PRINT_INTERVAL == 0) {
973 PROFILER_LOG_ERROR(LOG_CORE,
974 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
975 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
976 }
977 #endif
978 }
979
SendMmapFileRawData(int prot,int flags,off_t offset,const std::string & filePath,const StackRawData & rawdata,std::shared_ptr<HookSocketClient> & holder)980 inline void SendMmapFileRawData(int prot, int flags, off_t offset, const std::string& filePath,
981 const StackRawData& rawdata, std::shared_ptr<HookSocketClient>& holder)
982 {
983 StackRawData curRawdata = {{{{0}}}};
984 curRawdata.addr = rawdata.addr;
985 curRawdata.pid = static_cast<uint32_t>(g_hookPid);
986 curRawdata.mallocSize = rawdata.mallocSize;
987 curRawdata.mmapArgs.offset = offset;
988 curRawdata.type = OHOS::Developtools::NativeDaemon::MMAP_FILE_TYPE;
989 if (prot & PROT_EXEC) {
990 curRawdata.mmapArgs.flags |= PROT_EXEC;
991 }
992 size_t len = strlen(filePath.c_str()) + 1;
993 if (strncpy_s(curRawdata.name, PATH_MAX + 1, filePath.c_str(), len) != EOK) {
994 return;
995 }
996 if (flags & MAP_FIXED) {
997 curRawdata.mmapArgs.flags |= MAP_FIXED;
998 }
999 holder->SendStackWithPayload(&curRawdata, sizeof(BaseStackRawData) + len, nullptr, 0);
1000 }
1001
hook_mmap(void * (* fn)(void *,size_t,int,int,int,off_t),void * addr,size_t length,int prot,int flags,int fd,off_t offset)1002 void* hook_mmap(void*(*fn)(void*, size_t, int, int, int, off_t),
1003 void* addr, size_t length, int prot, int flags, int fd, off_t offset)
1004 {
1005 void* ret = nullptr;
1006 if (fn) {
1007 ret = fn(addr, length, prot, flags, fd, offset);
1008 }
1009 if (g_ClientConfig.mmapDisable || IsPidChanged()) {
1010 return ret;
1011 }
1012
1013 #ifdef PERFORMANCE_DEBUG
1014 struct timespec start = {};
1015 clock_gettime(CLOCK_REALTIME, &start);
1016 #endif
1017
1018 if ((fd < 0 && offset == 0) && g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL
1019 && g_sampler.StartSampling(length) == 0) {
1020 #ifdef PERFORMANCE_DEBUG
1021 g_mallocTimes++;
1022 struct timespec end = {};
1023 clock_gettime(CLOCK_REALTIME, &end);
1024 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1025 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1026 PROFILER_LOG_ERROR(LOG_CORE,
1027 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1028 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1029 }
1030 #endif
1031 return ret;
1032 }
1033 StackRawData rawdata = {{{{0}}}};
1034 const char* stackptr = nullptr;
1035 const char* stackendptr = nullptr;
1036 int stackSize = 0;
1037 int fpStackDepth = 0;
1038 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1039
1040 if (g_ClientConfig.fpunwind) {
1041 #ifdef __aarch64__
1042 void* stackAddr = nullptr;
1043 size_t coroutineStackSize = 0;
1044 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
1045 stackSize = static_cast<int>(coroutineStackSize);
1046 stackptr = reinterpret_cast<const char*>(stackAddr);
1047 stackendptr = stackptr + coroutineStackSize;
1048 } else {
1049 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
1050 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1051 stackSize = stackendptr - stackptr;
1052 }
1053 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
1054 stackSize = 0;
1055 rawdata.jsChainId = getJsChainId();
1056 #endif
1057 } else {
1058 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
1059 GetLocalRegs(regs);
1060 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
1061 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1062 stackSize = stackendptr - stackptr;
1063 if (stackendptr == nullptr) {
1064 stackSize = 0;
1065 }
1066 }
1067
1068 rawdata.type = MMAP_MSG;
1069 rawdata.pid = static_cast<uint32_t>(g_hookPid);
1070 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1071 rawdata.mallocSize = length;
1072 rawdata.addr = ret;
1073 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1074 auto holder = weakClient.lock();
1075 if (holder == nullptr) {
1076 return ret;
1077 }
1078 if (fd >= 0) {
1079 rawdata.type = MMAP_FILE_PAGE_MSG;
1080 char path[FD_PATH_LENGTH] = {0};
1081 char fileName[PATH_MAX + 1] = {0};
1082 (void)snprintf_s(path, FD_PATH_LENGTH, FD_PATH_LENGTH - 1, "/proc/self/fd/%d", fd);
1083 ssize_t len = readlink(path, fileName, sizeof(fileName) - 1);
1084 if (len != -1) {
1085 fileName[len] = '\0';
1086 SendMmapFileRawData(prot, flags, offset, fileName, rawdata, holder);
1087 char* p = strrchr(fileName, '/');
1088 if (p != nullptr) {
1089 rawdata.tagId = GetTagId(holder, &fileName[p - fileName + 1]);
1090 } else {
1091 rawdata.tagId = GetTagId(holder, fileName);
1092 }
1093 }
1094 }
1095 if (!UpdateThreadName(holder)) {
1096 return ret;
1097 }
1098 int realSize = 0;
1099 if (g_ClientConfig.fpunwind) {
1100 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1101 } else {
1102 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1103 }
1104 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
1105 #ifdef PERFORMANCE_DEBUG
1106 g_mallocTimes++;
1107 struct timespec end = {};
1108 clock_gettime(CLOCK_REALTIME, &end);
1109 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1110 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1111 PROFILER_LOG_ERROR(LOG_CORE,
1112 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1113 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1114 }
1115 #endif
1116 return ret;
1117 }
1118
hook_munmap(int (* fn)(void *,size_t),void * addr,size_t length)1119 int hook_munmap(int(*fn)(void*, size_t), void* addr, size_t length)
1120 {
1121 int ret = -1;
1122 struct timespec unmapTime = {};
1123 clock_gettime(g_ClientConfig.clockId, &unmapTime);
1124 if (fn) {
1125 ret = fn(addr, length);
1126 }
1127 if (g_ClientConfig.mmapDisable || IsPidChanged()) {
1128 return ret;
1129 }
1130
1131 #ifdef PERFORMANCE_DEBUG
1132 struct timespec start = {};
1133 clock_gettime(CLOCK_REALTIME, &start);
1134 #endif
1135
1136 int stackSize = 0;
1137 StackRawData rawdata = {{{{0}}}};
1138 const char* stackptr = nullptr;
1139 const char* stackendptr = nullptr;
1140 int fpStackDepth = 0;
1141 rawdata.ts = unmapTime;
1142 if (g_ClientConfig.munmapStackData) {
1143 if (g_ClientConfig.fpunwind) {
1144 #ifdef __aarch64__
1145 void* stackAddr = nullptr;
1146 size_t coroutineStackSize = 0;
1147 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
1148 stackSize = static_cast<int>(coroutineStackSize);
1149 stackptr = reinterpret_cast<const char*>(stackAddr);
1150 stackendptr = stackptr + coroutineStackSize;
1151 } else {
1152 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
1153 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1154 stackSize = stackendptr - stackptr;
1155 }
1156 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
1157 stackSize = 0;
1158 rawdata.jsChainId = getJsChainId();
1159 #endif
1160 } else {
1161 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
1162 GetLocalRegs(regs);
1163 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
1164 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1165 stackSize = stackendptr - stackptr;
1166 if (stackendptr == nullptr) {
1167 stackSize = 0;
1168 }
1169 }
1170 }
1171
1172 rawdata.type = MUNMAP_MSG;
1173 rawdata.pid = static_cast<uint32_t>(g_hookPid);
1174 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1175 rawdata.mallocSize = length;
1176 rawdata.addr = addr;
1177 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1178 auto holder = weakClient.lock();
1179 if (holder != nullptr) {
1180 int realSize = 0;
1181 if (g_ClientConfig.fpunwind) {
1182 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1183 } else {
1184 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1185 }
1186 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
1187 }
1188 #ifdef PERFORMANCE_DEBUG
1189 g_mallocTimes++;
1190 struct timespec end = {};
1191 clock_gettime(CLOCK_REALTIME, &end);
1192 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1193 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1194 PROFILER_LOG_ERROR(LOG_CORE,
1195 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1196 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1197 }
1198 #endif
1199 return ret;
1200 }
1201
hook_prctl(int (* fn)(int,...),int option,unsigned long arg2,unsigned long arg3,unsigned long arg4,unsigned long arg5)1202 int hook_prctl(int(*fn)(int, ...),
1203 int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5)
1204 {
1205 int ret = -1;
1206 if (fn) {
1207 ret = fn(option, arg2, arg3, arg4, arg5);
1208 }
1209 if (reinterpret_cast<char*>(arg5) == nullptr || IsPidChanged()) {
1210 return ret;
1211 }
1212 if (option == PR_SET_VMA && arg2 == PR_SET_VMA_ANON_NAME) {
1213 #ifdef PERFORMANCE_DEBUG
1214 struct timespec start = {};
1215 clock_gettime(CLOCK_REALTIME, &start);
1216 #endif
1217 StackRawData rawdata = {{{{0}}}};
1218 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1219 rawdata.type = PR_SET_VMA_MSG;
1220 rawdata.pid = static_cast<uint32_t>(g_hookPid);
1221 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1222 rawdata.mallocSize = arg4;
1223 rawdata.addr = reinterpret_cast<void*>(arg3);
1224 size_t tagLen = strlen(reinterpret_cast<char*>(arg5)) + 1;
1225 if (memcpy_s(rawdata.name, sizeof(rawdata.name), reinterpret_cast<char*>(arg5), tagLen) != EOK) {
1226 HILOG_ERROR(LOG_CORE, "memcpy_s tag failed");
1227 }
1228 rawdata.name[sizeof(rawdata.name) - 1] = '\0';
1229 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1230 auto holder = weakClient.lock();
1231 if (holder != nullptr) {
1232 holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData) + tagLen, nullptr, 0);
1233 }
1234 #ifdef PERFORMANCE_DEBUG
1235 g_mallocTimes++;
1236 struct timespec end = {};
1237 clock_gettime(CLOCK_REALTIME, &end);
1238 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1239 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1240 PROFILER_LOG_ERROR(LOG_CORE,
1241 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1242 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1243 }
1244 #endif
1245 }
1246 return ret;
1247 }
1248
hook_memtrace(void * addr,size_t size,const char * tag,bool isUsing)1249 void hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing)
1250 {
1251 if (!g_ClientConfig.memtraceEnable || IsPidChanged()) {
1252 return;
1253 }
1254 #ifdef PERFORMANCE_DEBUG
1255 struct timespec start = {};
1256 clock_gettime(CLOCK_REALTIME, &start);
1257 #endif
1258 int stackSize = 0;
1259 StackRawData rawdata = {{{{0}}}};
1260 const char* stackptr = nullptr;
1261 const char* stackendptr = nullptr;
1262 int fpStackDepth = 0;
1263 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1264
1265 if (isUsing) {
1266 if (g_ClientConfig.fpunwind) {
1267 #ifdef __aarch64__
1268 void* stackAddr = nullptr;
1269 size_t coroutineStackSize = 0;
1270 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) {
1271 stackSize = static_cast<int>(coroutineStackSize);
1272 stackptr = reinterpret_cast<const char*>(stackAddr);
1273 stackendptr = stackptr + coroutineStackSize;
1274 } else {
1275 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
1276 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1277 stackSize = stackendptr - stackptr;
1278 }
1279 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr);
1280 stackSize = 0;
1281 rawdata.jsChainId = getJsChainId();
1282 #endif
1283 } else {
1284 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs));
1285 GetLocalRegs(regs);
1286 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
1287 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
1288 stackSize = stackendptr - stackptr;
1289 if (stackendptr == nullptr) {
1290 stackSize = 0;
1291 }
1292 }
1293 }
1294 rawdata.type = isUsing ? MEMORY_USING_MSG : MEMORY_UNUSING_MSG;
1295 rawdata.pid = static_cast<uint32_t>(g_hookPid);
1296 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1297 rawdata.mallocSize = size;
1298 rawdata.addr = addr;
1299 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1300 auto holder = weakClient.lock();
1301 rawdata.tagId = isUsing ? GetTagId(holder, tag) : 0;
1302 if (holder != nullptr) {
1303 int realSize = 0;
1304 if (g_ClientConfig.fpunwind) {
1305 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1306 } else {
1307 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1308 }
1309 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize);
1310 }
1311 #ifdef PERFORMANCE_DEBUG
1312 g_mallocTimes++;
1313 struct timespec end = {};
1314 clock_gettime(CLOCK_REALTIME, &end);
1315 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1316 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1317 PROFILER_LOG_ERROR(LOG_CORE,
1318 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1319 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1320 }
1321 #endif
1322 }
1323
ohos_malloc_hook_initialize(const MallocDispatchType * malloc_dispatch,bool *,const char *)1324 bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*)
1325 {
1326 g_dispatch.store(malloc_dispatch);
1327 InititalizeIPC();
1328 return true;
1329 }
ohos_malloc_hook_finalize(void)1330 void ohos_malloc_hook_finalize(void)
1331 {
1332 FinalizeIPC();
1333 }
1334
ohos_malloc_hook_malloc(size_t size)1335 void* ohos_malloc_hook_malloc(size_t size)
1336 {
1337 __set_hook_flag(false);
1338 void* ret = hook_malloc(GetDispatch()->malloc, size);
1339 __set_hook_flag(true);
1340 return ret;
1341 }
1342
ohos_malloc_hook_realloc(void * ptr,size_t size)1343 void* ohos_malloc_hook_realloc(void* ptr, size_t size)
1344 {
1345 __set_hook_flag(false);
1346 void* ret = hook_realloc(GetDispatch()->realloc, ptr, size);
1347 __set_hook_flag(true);
1348 return ret;
1349 }
1350
ohos_malloc_hook_calloc(size_t number,size_t size)1351 void* ohos_malloc_hook_calloc(size_t number, size_t size)
1352 {
1353 __set_hook_flag(false);
1354 void* ret = hook_calloc(GetDispatch()->calloc, number, size);
1355 __set_hook_flag(true);
1356 return ret;
1357 }
1358
ohos_malloc_hook_valloc(size_t size)1359 void* ohos_malloc_hook_valloc(size_t size)
1360 {
1361 __set_hook_flag(false);
1362 void* ret = hook_valloc(GetDispatch()->valloc, size);
1363 __set_hook_flag(true);
1364 return ret;
1365 }
1366
ohos_malloc_hook_free(void * p)1367 void ohos_malloc_hook_free(void* p)
1368 {
1369 __set_hook_flag(false);
1370 hook_free(GetDispatch()->free, p);
1371 __set_hook_flag(true);
1372 }
1373
ohos_malloc_hook_malloc_usable_size(void * mem)1374 size_t ohos_malloc_hook_malloc_usable_size(void* mem)
1375 {
1376 __set_hook_flag(false);
1377 size_t ret = hook_malloc_usable_size(GetDispatch()->malloc_usable_size, mem);
1378 __set_hook_flag(true);
1379 return ret;
1380 }
1381
ohos_malloc_hook_get_hook_flag(void)1382 bool ohos_malloc_hook_get_hook_flag(void)
1383 {
1384 return pthread_getspecific(g_disableHookFlag) == nullptr;
1385 }
1386
ohos_malloc_hook_set_hook_flag(bool flag)1387 bool ohos_malloc_hook_set_hook_flag(bool flag)
1388 {
1389 bool oldFlag = ohos_malloc_hook_get_hook_flag();
1390 if (flag) {
1391 pthread_setspecific(g_disableHookFlag, nullptr);
1392 } else {
1393 pthread_setspecific(g_disableHookFlag, reinterpret_cast<void *>(1));
1394 }
1395 return oldFlag;
1396 }
1397
ohos_malloc_hook_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)1398 void* ohos_malloc_hook_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
1399 {
1400 __set_hook_flag(false);
1401 void* ret = hook_mmap(GetDispatch()->mmap, addr, length, prot, flags, fd, offset);
1402 __set_hook_flag(true);
1403 return ret;
1404 }
1405
ohos_malloc_hook_munmap(void * addr,size_t length)1406 int ohos_malloc_hook_munmap(void* addr, size_t length)
1407 {
1408 __set_hook_flag(false);
1409 int ret = hook_munmap(GetDispatch()->munmap, addr, length);
1410 __set_hook_flag(true);
1411 return ret;
1412 }
1413
ohos_malloc_hook_memtrace(void * addr,size_t size,const char * tag,bool isUsing)1414 void ohos_malloc_hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing)
1415 {
1416 __set_hook_flag(false);
1417 hook_memtrace(addr, size, tag, isUsing);
1418 __set_hook_flag(true);
1419 }
1420
ohos_malloc_hook_aligned_alloc(size_t align,size_t len)1421 void* ohos_malloc_hook_aligned_alloc(size_t align, size_t len)
1422 {
1423 __set_hook_flag(false);
1424 void* ret = hook_aligned_alloc(GetDispatch()->aligned_alloc, align, len);
1425 __set_hook_flag(true);
1426 return ret;
1427 }
1428
ohos_malloc_hook_prctl(int option,unsigned long arg2,unsigned long arg3,unsigned long arg4,unsigned long arg5)1429 int ohos_malloc_hook_prctl(int option, unsigned long arg2, unsigned long arg3,
1430 unsigned long arg4, unsigned long arg5)
1431 {
1432 __set_hook_flag(false);
1433 int ret = hook_prctl((GetDispatch()->prctl), option, arg2, arg3, arg4, arg5);
1434 __set_hook_flag(true);
1435 return ret;
1436 }
1437
ohos_set_filter_size(size_t size,void * ret)1438 bool ohos_set_filter_size(size_t size, void* ret)
1439 {
1440 if (g_ClientConfig.filterSize < 0 || size < static_cast<size_t>(g_ClientConfig.filterSize) || size > g_maxSize) {
1441 return false;
1442 }
1443 return true;
1444 }
1445
IsPidChanged(void)1446 static bool IsPidChanged(void)
1447 {
1448 if (g_isPidChanged) {
1449 return true;
1450 }
1451 int pid = getprocpid();
1452 // hap app after pid namespace used
1453 if (pid == PID_NAMESPACE_ID) {
1454 return false;
1455 } else {
1456 // native app & sa service
1457 g_isPidChanged = (g_hookPid != pid);
1458 }
1459 return g_isPidChanged;
1460 }
1461
ohos_malloc_hook_send_hook_misc_data(uint64_t id,const char * stackPtr,size_t stackSize,uint32_t type)1462 bool ohos_malloc_hook_send_hook_misc_data(uint64_t id, const char* stackPtr, size_t stackSize, uint32_t type)
1463 {
1464 if (type == static_cast<uint32_t>(MISC_TYPE::JS_STACK_DATA)) {
1465 StackRawData rawdata = {{{{0}}}};
1466 rawdata.jsChainId = id;
1467 rawdata.type = JS_STACK_MSG;
1468 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1469 auto holder = weakClient.lock();
1470 if (holder != nullptr) {
1471 return holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData), stackPtr, stackSize);
1472 }
1473 }
1474 return false;
1475 }
1476
ohos_malloc_hook_get_hook_config()1477 void* ohos_malloc_hook_get_hook_config()
1478 {
1479 return &g_ClientConfig.arktsConfig;
1480 }