1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <atomic>
17 #include <climits>
18 #include <dlfcn.h>
19 #include <fcntl.h>
20 #include <malloc.h>
21 #include <string>
22 #include <sys/time.h>
23 #include <pthread.h>
24 #include <sys/prctl.h>
25 #include <unordered_map>
26 #include <unordered_set>
27 #include "dfx_regs_get.h"
28 #include "common.h"
29 #include "hook_common.h"
30 #include "hook_socket_client.h"
31 #include "logging.h"
32 #include "musl_preinit_common.h"
33 #include "parameter.h"
34 #include "stack_writer.h"
35 #include "runtime_stack_range.h"
36 #include "get_thread_id.h"
37 #include "hook_client.h"
38 #include <sys/mman.h>
39 #include "sampling.h"
40 #include "hitrace/trace.h"
41
42 using namespace OHOS::HiviewDFX;
43 using namespace OHOS::Developtools::NativeDaemon;
44
45 static pthread_key_t g_disableHookFlag = 10000;
46 static pthread_key_t g_hookTid;
47 static pthread_key_t g_updateThreadNameCount = 10000;
48 static pthread_once_t g_onceFlag;
49 namespace {
50 static std::atomic<uint64_t> g_mallocTimes = 0;
51
52 enum class MISC_TYPE : uint32_t {
53 JS_STACK_DATA = 1,
54 };
55
56 #ifdef PERFORMANCE_DEBUG
57 static std::atomic<uint64_t> g_timeCost = 0;
58 static std::atomic<uint64_t> g_dataCounts = 0;
59 constexpr int PRINT_INTERVAL = 5000;
60 constexpr uint64_t S_TO_NS = 1000 * 1000 * 1000;
61 #endif
62
63 using OHOS::Developtools::NativeDaemon::buildArchType;
64 static std::shared_ptr<HookSocketClient> g_hookClient {nullptr};
65 static Sampling g_sampler;
66 std::recursive_timed_mutex g_ClientMutex;
67 std::recursive_timed_mutex g_FilterMapMutex;
68 std::mutex g_tagMapMutex;
69 std::mutex g_usableSizeMapMutex;
70 std::atomic<const MallocDispatchType*> g_dispatch {nullptr};
71 constexpr int UPDATE_THEARD_NAME = 1000;
72 static std::atomic<pid_t> g_hookPid = 0;
73 static ClientConfig g_ClientConfig = {0};
74 static uint32_t g_maxSize = INT_MAX;
75 static std::unordered_map<std::string, uint32_t> g_memTagMap;
76 static std::unordered_map<size_t, size_t> g_mallocUsableSizeMap;
77 constexpr int PID_STR_SIZE = 4;
78 constexpr int STATUS_LINE_SIZE = 512;
79 constexpr int PID_NAMESPACE_ID = 1; // 1: pid is 1 after pid namespace used
80 constexpr int FD_PATH_LENGTH = 64;
81 constexpr int MIN_SAMPLER_INTERVAL = 1;
82 constexpr int FIRST_HASH = 16;
83 constexpr int SECOND_HASH = 13;
84 constexpr int THRESHOLD = 256;
85 constexpr int DIVIDE_VAL = 64;
86 constexpr uintptr_t MAX_UNWIND_ADDR_RANGE = 16 * 1024;
87 //5: fp mode is used, response_library_mode maximum stack depth
88 #if defined(__aarch64__)
89 constexpr int RESPONSE_LIBRARY_MODE_DEPTH = 5;
90 constexpr int TEMP_IP = 100;
91 #endif
92 static bool g_isPidChanged = false;
93 static struct mallinfo2 g_miStart = {0};
94 std::vector<std::pair<uint64_t, uint64_t>> g_filterStaLibRange;
95 constexpr int MAX_BITPOOL_SIZE = 1000 * 1024;
96 struct Bitpool {
97 std::atomic<uint64_t> slot;
98 };
99 Bitpool* g_addressChecker = nullptr;
100
AddrHash(uint32_t h)101 inline static uint32_t AddrHash(uint32_t h)
102 {
103 h ^= h >> FIRST_HASH;
104 h *= 0x85ebca6b;
105 h ^= h >> SECOND_HASH;
106 h *= 0xc2b2ae35;
107 h ^= h >> FIRST_HASH;
108 return h;
109 }
110
Addr2Bitpool(void * addr)111 inline void Addr2Bitpool(void* addr)
112 {
113 if (!g_addressChecker) {
114 return;
115 }
116 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL);
117 g_addressChecker[val / DIVIDE_VAL].slot |= (0x1 << (val % DIVIDE_VAL));
118 }
119
IsAddrExist(void * addr)120 inline bool IsAddrExist(void* addr)
121 {
122 if (!g_addressChecker) {
123 return true;
124 }
125 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL);
126 if (g_addressChecker[val / DIVIDE_VAL].slot.load() & (0x1 << (val % DIVIDE_VAL))) {
127 return true;
128 }
129 return false;
130 }
131
GetDispatch()132 const MallocDispatchType* GetDispatch()
133 {
134 return g_dispatch.load(std::memory_order_relaxed);
135 }
136
InititalizeIPC()137 bool InititalizeIPC()
138 {
139 return true;
140 }
FinalizeIPC()141 void FinalizeIPC() {}
142
ConvertPid(char * buf,size_t len)143 int ConvertPid(char* buf, size_t len)
144 {
145 UNUSED_PARAMETER(len);
146 int count = 0;
147 char pidBuf[11] = {0}; /* 11: 32 bits to the maximum length of a string */
148 char *str = buf;
149 while (*str != '\0') {
150 if ((*str >= '0') && (*str <= '9') && (static_cast<unsigned long>(count) < sizeof(pidBuf) - 1)) {
151 pidBuf[count] = *str;
152 count++;
153 str++;
154 continue;
155 }
156
157 if (count > 0) {
158 break;
159 }
160 str++;
161 }
162 return atoi(pidBuf);
163 }
164
GetRealPid(void)165 pid_t GetRealPid(void)
166 {
167 const char *path = "/proc/self/status";
168 char buf[STATUS_LINE_SIZE] = {0};
169 FILE *fp = fopen(path, "r");
170 CHECK_NOTNULL(fp, -1, "fopen fail");
171 while (fp != nullptr && !feof(fp)) {
172 if (fgets(buf, STATUS_LINE_SIZE, fp) == nullptr) {
173 fclose(fp);
174 return -1;
175 }
176 if (strncmp(buf, "Pid:", PID_STR_SIZE) == 0) {
177 break;
178 }
179 }
180 (void)fclose(fp);
181 return static_cast<pid_t>(ConvertPid(buf, sizeof(buf)));
182 }
183 } // namespace
184
GetCurThreadId()185 pid_t inline __attribute__((always_inline)) GetCurThreadId()
186 {
187 if (pthread_getspecific(g_hookTid) == nullptr) {
188 pthread_setspecific(g_hookTid, reinterpret_cast<void *>(GetThreadId()));
189 }
190 return reinterpret_cast<long>((pthread_getspecific(g_hookTid)));
191 }
192
UpdateThreadName(std::shared_ptr<HookSocketClient> & client)193 bool inline __attribute__((always_inline)) UpdateThreadName(std::shared_ptr<HookSocketClient>& client)
194 {
195 long updateCount = reinterpret_cast<long>(pthread_getspecific(g_updateThreadNameCount));
196 bool ret = true;
197 if (updateCount == 0) {
198 NameData tnameData = {{{{0}}}};
199 tnameData.tid = static_cast<uint32_t>(GetCurThreadId());
200 tnameData.type = THREAD_NAME_MSG;
201 prctl(PR_GET_NAME, tnameData.name);
202 ret = client->SendStackWithPayload(&tnameData,
203 sizeof(BaseStackRawData) + strlen(tnameData.name) + 1, nullptr, 0);
204 if (!ret) {
205 return ret;
206 }
207 }
208 pthread_setspecific(g_updateThreadNameCount,
209 reinterpret_cast<void *>(updateCount == UPDATE_THEARD_NAME ? 0 : updateCount + 1));
210 return ret;
211 }
212
GetTagId(std::shared_ptr<HookSocketClient> & client,const char * tagName)213 uint32_t inline __attribute__((always_inline)) GetTagId(std::shared_ptr<HookSocketClient>& client, const char* tagName)
214 {
215 if (tagName == nullptr || strlen(tagName) > MAX_HOOK_PATH) {
216 return 0;
217 }
218 uint32_t tagId = 0;
219 bool isNewTag = false;
220 std::unique_lock<std::mutex> lock(g_tagMapMutex);
221 auto it = g_memTagMap.find(tagName);
222 if (it == g_memTagMap.end()) {
223 isNewTag = true;
224 tagId = g_memTagMap.size() + 1;
225 g_memTagMap[tagName] = tagId;
226 } else {
227 tagId = it->second;
228 }
229 lock.unlock();
230 if (isNewTag) {
231 NameData tagData = {{{{0}}}};
232 tagData.type = MEMORY_TAG;
233 tagData.tagId = tagId;
234 strcpy_s(tagData.name, MAX_HOOK_PATH + 1, tagName);
235 if (client != nullptr) {
236 client->SendStackWithPayload(&tagData, sizeof(BaseStackRawData) + strlen(tagName) + 1, nullptr, 0);
237 }
238 }
239 return tagId;
240 }
241
242 static bool IsPidChanged(void);
243
244 /* 返回值:true:该size大小需要过滤,不记录trace信息; fasle:不需要过滤,按正常流程记录trace信息 */
SimplifiedFilter(void * ptr,size_t mallcoSize)245 static bool SimplifiedFilter(void* ptr, size_t mallcoSize)
246 {
247 if (g_ClientConfig.largestSize == 0 || g_ClientConfig.secondLargestSize == 0) {
248 return false;
249 }
250
251 size_t usableSize = 0;
252 if (mallcoSize == 0) {
253 /* hook_free */
254 usableSize = malloc_usable_size(ptr);
255 } else {
256 std::unique_lock<std::mutex> lock(g_usableSizeMapMutex);
257 auto it = g_mallocUsableSizeMap.find(mallcoSize);
258 if (it == g_mallocUsableSizeMap.end()) {
259 usableSize = malloc_usable_size(ptr);
260 g_mallocUsableSizeMap[mallcoSize] = usableSize;
261 } else {
262 usableSize = it->second;
263 }
264 lock.unlock();
265 }
266
267 if (usableSize >= g_ClientConfig.sampleInterval) {
268 return false;
269 }
270
271 if ((usableSize == g_ClientConfig.largestSize) ||
272 (usableSize == g_ClientConfig.secondLargestSize) ||
273 (usableSize == g_ClientConfig.maxGrowthSize)) {
274 return false;
275 }
276
277 return true;
278 }
279
MallocHookStart(void * disableHookCallback)280 void* MallocHookStart(void* disableHookCallback)
281 {
282 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
283 PROFILER_LOG_INFO(LOG_CORE, "MallocHookStart begin!");
284 g_addressChecker = new Bitpool [MAX_BITPOOL_SIZE] {{0}};
285 g_mallocTimes = 0;
286 g_hookClient.reset();
287 g_hookPid = GetRealPid();
288 ParseSelfMaps(g_filterStaLibRange);
289 if (g_hookClient != nullptr) {
290 return nullptr;
291 } else {
292 g_ClientConfig.Reset();
293 g_sampler.Reset();
294 g_hookClient = std::make_shared<HookSocketClient>(g_hookPid.load(), &g_ClientConfig, &g_sampler,
295 reinterpret_cast<void (*)()>(disableHookCallback));
296 }
297 return nullptr;
298 }
299
InitHookTidKey()300 static void InitHookTidKey()
301 {
302 if (pthread_key_create(&g_hookTid, nullptr) != 0) {
303 return;
304 }
305 pthread_setspecific(g_hookTid, nullptr);
306 }
307
InitTheadKey()308 static bool InitTheadKey()
309 {
310 if (g_disableHookFlag != 10000) { // 10000: initial value
311 pthread_key_delete(g_disableHookFlag);
312 }
313 if (pthread_key_create(&g_disableHookFlag, nullptr) != 0) {
314 return false;
315 }
316 pthread_setspecific(g_disableHookFlag, nullptr);
317 pthread_once(&g_onceFlag, InitHookTidKey);
318 if (g_updateThreadNameCount != 10000) { // 10000: initial value
319 pthread_key_delete(g_updateThreadNameCount);
320 }
321 if (pthread_key_create(&g_updateThreadNameCount, nullptr) != 0) {
322 return false;
323 }
324 pthread_setspecific(g_updateThreadNameCount, reinterpret_cast<void *>(0));
325 return true;
326 }
327
ohos_malloc_hook_on_start(void (* disableHookCallback)())328 bool ohos_malloc_hook_on_start(void (*disableHookCallback)())
329 {
330 pthread_t threadStart;
331 if (pthread_create(&threadStart, nullptr, MallocHookStart,
332 reinterpret_cast<void *>(disableHookCallback))) {
333 return false;
334 }
335 pthread_detach(threadStart);
336 if (!InitTheadKey()) {
337 return false;
338 }
339 constexpr int paramBufferLen = 128;
340 char paramOutBuf[paramBufferLen] = {0};
341 int ret = GetParameter("persist.hiviewdfx.profiler.mem.filter", "", paramOutBuf, paramBufferLen);
342 if (ret > 0) {
343 int min = 0;
344 int max = 0;
345 if (sscanf_s(paramOutBuf, "%d,%d", &min, &max) == 2) { // 2: two parameters.
346 g_maxSize = max > 0 ? static_cast<uint32_t>(max) : INT_MAX;
347 g_ClientConfig.filterSize = min > 0 ? min : 0;
348 }
349 }
350 return true;
351 }
352
ohos_release_on_end(void *)353 void* ohos_release_on_end(void*)
354 {
355 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
356 PROFILER_LOG_INFO(LOG_CORE, "ohos_release_on_end begin!");
357 delete [] g_addressChecker;
358 g_addressChecker = nullptr;
359 g_hookClient = nullptr;
360 g_ClientConfig.Reset();
361 return nullptr;
362 }
363
ohos_malloc_hook_on_end(void)364 bool ohos_malloc_hook_on_end(void)
365 {
366 {
367 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
368 if (g_hookClient != nullptr) {
369 if (g_hookClient->GetNmdType() == 1) {
370 g_hookClient->SendNmdInfo();
371 }
372 g_hookClient->SendEndMsg();
373 g_hookClient->Flush();
374 }
375 }
376 pthread_t threadEnd;
377 if (pthread_create(&threadEnd, nullptr, ohos_release_on_end, nullptr)) {
378 return false;
379 }
380 pthread_detach(threadEnd);
381 return true;
382 }
383
FilterStandardSoIp(uint64_t ip)384 bool FilterStandardSoIp(uint64_t ip)
385 {
386 std::lock_guard<std::recursive_timed_mutex> guard(g_FilterMapMutex);
387 for (auto [soBegin, soEnd_]: g_filterStaLibRange) {
388 if (ip >= soBegin && ip < soEnd_) {
389 return true;
390 }
391 }
392 return false;
393 }
394
395 #if defined(__aarch64__)
FpUnwind(int maxDepth,uint64_t * ips)396 static int inline __attribute__((always_inline)) FpUnwind(int maxDepth, uint64_t* ips)
397 {
398 uintptr_t stackBottom = 0;
399 uintptr_t stackTop = 0;
400 uintptr_t stackPtr = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
401 int depth = 0;
402 if (!GetRuntimeStackRange(stackPtr, stackBottom, stackTop, g_hookPid.load() == GetCurThreadId())) {
403 return depth;
404 }
405
406 uintptr_t startFp = stackPtr;
407 uintptr_t nextFp = *reinterpret_cast<uintptr_t*>(startFp);
408 if (nextFp <= stackPtr) {
409 return depth;
410 }
411 uintptr_t fp = nextFp; // skip current frame
412
413 int count = 0;
414 uint64_t ip = 0;
415 while (depth < maxDepth && (fp - startFp < MAX_UNWIND_ADDR_RANGE)) {
416 if (fp < stackBottom || fp >= stackTop - sizeof(uintptr_t)) {
417 break;
418 }
419 ip = *reinterpret_cast<uintptr_t*>(fp + sizeof(uintptr_t));
420 if (g_ClientConfig.responseLibraryMode) {
421 if (++count >= RESPONSE_LIBRARY_MODE_DEPTH || !FilterStandardSoIp(ip)) {
422 break;
423 }
424 } else {
425 ips[depth++] = ip > 0x4 ? ip - 0x4 : ip; // adjust pc in Arm64 architecture
426 }
427
428 nextFp = *reinterpret_cast<uintptr_t*>(fp);
429 if (nextFp <= stackPtr) {
430 break;
431 }
432 if (fp == nextFp) {
433 depth -= 1;
434 break;
435 }
436 fp = nextFp;
437 }
438 if (g_ClientConfig.responseLibraryMode) {
439 ips[0] = ip > 0x4 ? ip - 0x4 : ip;
440 depth = 1;
441 }
442 return depth;
443 }
444
getJsChainId()445 uint64_t getJsChainId()
446 {
447 if (g_ClientConfig.arktsConfig.jsStackReport > 0) {
448 OHOS::HiviewDFX::HiTraceId hitraceId = OHOS::HiviewDFX::HiTraceChain::GetId();
449 if (hitraceId.IsValid()) {
450 return hitraceId.GetChainId();
451 }
452 }
453 return 0;
454 }
455 #endif
456
GetStackSize(uintptr_t & stackPtr,StackRawData & rawdata)457 static int inline __attribute__((always_inline)) GetStackSize(uintptr_t& stackPtr, StackRawData& rawdata)
458 {
459 uintptr_t* regs = reinterpret_cast<uintptr_t*>(&(rawdata.regs));
460 GetLocalRegs(regs);
461 stackPtr = reinterpret_cast<uintptr_t>(regs[RegisterGetSP(buildArchType)]);
462 uintptr_t stackBottom = 0;
463 uintptr_t stackTop = 0;
464 int stackSize = 0;
465 if (!GetRuntimeStackRange(stackPtr, stackBottom, stackTop, g_hookPid.load() == GetCurThreadId())) {
466 return stackSize;
467 }
468 stackSize = static_cast<int>(stackTop - stackPtr);
469 return stackSize;
470 }
471
hook_malloc(void * (* fn)(size_t),size_t size)472 void* hook_malloc(void* (*fn)(size_t), size_t size)
473 {
474 void* ret = nullptr;
475 if (fn) {
476 ret = fn(size);
477 }
478 if (SimplifiedFilter(ret, size)) {
479 return ret;
480 }
481 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
482 return ret;
483 }
484 if (!ohos_set_filter_size(size, ret)) {
485 return ret;
486 }
487
488 #ifdef PERFORMANCE_DEBUG
489 struct timespec start = {};
490 clock_gettime(CLOCK_REALTIME, &start);
491 #endif
492
493 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) { //0 not sampling
494 #ifdef PERFORMANCE_DEBUG
495 g_mallocTimes++;
496 struct timespec end = {};
497 clock_gettime(CLOCK_REALTIME, &end);
498 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
499 if (g_mallocTimes % PRINT_INTERVAL == 0) {
500 PROFILER_LOG_ERROR(LOG_CORE,
501 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
502 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
503 }
504 #endif
505 return ret;
506 }
507
508 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
509 auto holder = weakClient.lock();
510 if (holder == nullptr) {
511 return ret;
512 }
513 if (!UpdateThreadName(holder)) {
514 return ret;
515 }
516 StackRawData rawdata = {{{{0}}}};
517 uintptr_t stackPtr = 0;
518 int stackSize = 0;
519 int fpStackDepth = 0;
520 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
521
522 if (g_ClientConfig.fpunwind) {
523 #ifdef __aarch64__
524 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
525 rawdata.jsChainId = getJsChainId();
526 #endif
527 } else {
528 stackSize = GetStackSize(stackPtr, rawdata);
529 }
530 rawdata.type = MALLOC_MSG;
531 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
532 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
533 rawdata.mallocSize = size;
534 rawdata.addr = ret;
535 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
536 Addr2Bitpool(ret);
537 }
538 int realSize = 0;
539 if (g_ClientConfig.fpunwind) {
540 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
541 } else {
542 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
543 }
544 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
545 g_mallocTimes++;
546 #ifdef PERFORMANCE_DEBUG
547 struct timespec end = {};
548 clock_gettime(CLOCK_REALTIME, &end);
549 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
550 g_dataCounts += stackSize;
551 if (g_mallocTimes % PRINT_INTERVAL == 0) {
552 PROFILER_LOG_ERROR(LOG_CORE,
553 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
554 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
555 }
556 #endif
557 return ret;
558 }
559
hook_aligned_alloc(void * (* fn)(size_t,size_t),size_t align,size_t len)560 void* hook_aligned_alloc(void* (*fn)(size_t, size_t), size_t align, size_t len)
561 {
562 void* ret = nullptr;
563 if (fn) {
564 ret = fn(align, len);
565 }
566 if (SimplifiedFilter(ret, len)) {
567 return ret;
568 }
569 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
570 return ret;
571 }
572 if (!ohos_set_filter_size(len, ret)) {
573 return ret;
574 }
575
576 #ifdef PERFORMANCE_DEBUG
577 struct timespec start = {};
578 clock_gettime(CLOCK_REALTIME, &start);
579 #endif
580
581 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(len) == 0) { //0 not sampling
582 #ifdef PERFORMANCE_DEBUG
583 g_mallocTimes++;
584 struct timespec end = {};
585 clock_gettime(CLOCK_REALTIME, &end);
586 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
587 if (g_mallocTimes % PRINT_INTERVAL == 0) {
588 PROFILER_LOG_ERROR(LOG_CORE,
589 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %"
590 PRIu64"\n", g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(),
591 g_timeCost.load() / g_mallocTimes.load());
592 }
593 #endif
594 return ret;
595 }
596
597 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
598 auto holder = weakClient.lock();
599 if (holder == nullptr) {
600 return ret;
601 }
602 if (!UpdateThreadName(holder)) {
603 return ret;
604 }
605 StackRawData rawdata = {{{{0}}}};
606 uintptr_t stackPtr = 0;
607 int stackSize = 0;
608 int fpStackDepth = 0;
609 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
610
611 if (g_ClientConfig.fpunwind) {
612 #ifdef __aarch64__
613 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
614 rawdata.jsChainId = getJsChainId();
615 #endif
616 } else {
617 stackSize = GetStackSize(stackPtr, rawdata);
618 }
619
620 rawdata.type = MALLOC_MSG;
621 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
622 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
623 rawdata.mallocSize = len;
624 rawdata.addr = ret;
625 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
626 Addr2Bitpool(ret);
627 }
628 int realSize = 0;
629 if (g_ClientConfig.fpunwind) {
630 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
631 } else {
632 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
633 }
634 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
635 g_mallocTimes++;
636 #ifdef PERFORMANCE_DEBUG
637 struct timespec end = {};
638 clock_gettime(CLOCK_REALTIME, &end);
639 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
640 g_dataCounts += stackSize;
641 if (g_mallocTimes % PRINT_INTERVAL == 0) {
642 PROFILER_LOG_ERROR(LOG_CORE,
643 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %"
644 PRIu64"\n", g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(),
645 g_timeCost.load() / g_mallocTimes.load());
646 }
647 #endif
648 return ret;
649 }
650
hook_valloc(void * (* fn)(size_t),size_t size)651 void* hook_valloc(void* (*fn)(size_t), size_t size)
652 {
653 void* pRet = nullptr;
654 if (fn) {
655 pRet = fn(size);
656 }
657 return pRet;
658 }
659
hook_calloc(void * (* fn)(size_t,size_t),size_t number,size_t size)660 void* hook_calloc(void* (*fn)(size_t, size_t), size_t number, size_t size)
661 {
662 void* pRet = nullptr;
663 if (fn) {
664 pRet = fn(number, size);
665 }
666 if (SimplifiedFilter(pRet, number * size)) {
667 return pRet;
668 }
669 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
670 return pRet;
671 }
672 if (!ohos_set_filter_size(number * size, pRet)) {
673 return pRet;
674 }
675
676 #ifdef PERFORMANCE_DEBUG
677 struct timespec start = {};
678 clock_gettime(CLOCK_REALTIME, &start);
679 #endif
680
681 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size * number) == 0) {
682 #ifdef PERFORMANCE_DEBUG
683 g_mallocTimes++;
684 struct timespec end = {};
685 clock_gettime(CLOCK_REALTIME, &end);
686 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
687 if (g_mallocTimes % PRINT_INTERVAL == 0) {
688 PROFILER_LOG_ERROR(LOG_CORE,
689 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
690 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
691 }
692 #endif
693 return pRet;
694 }
695 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
696 auto holder = weakClient.lock();
697 if (holder == nullptr) {
698 return pRet;
699 }
700 StackRawData rawdata = {{{{0}}}};
701 uintptr_t stackPtr = 0;
702 int stackSize = 0;
703 int fpStackDepth = 0;
704 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
705
706 if (g_ClientConfig.fpunwind) {
707 #ifdef __aarch64__
708 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
709 rawdata.jsChainId = getJsChainId();
710 #endif
711 } else {
712 stackSize = GetStackSize(stackPtr, rawdata);
713 }
714
715 rawdata.type = MALLOC_MSG;
716 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
717 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
718 rawdata.mallocSize = number * size;
719 rawdata.addr = pRet;
720 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
721 Addr2Bitpool(pRet);
722 }
723 int realSize = 0;
724 if (g_ClientConfig.fpunwind) {
725 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
726 } else {
727 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
728 }
729 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
730 g_mallocTimes++;
731 #ifdef PERFORMANCE_DEBUG
732 struct timespec end = {};
733 clock_gettime(CLOCK_REALTIME, &end);
734 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
735 if (g_mallocTimes % PRINT_INTERVAL == 0) {
736 PROFILER_LOG_ERROR(LOG_CORE,
737 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
738 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
739 }
740 #endif
741 return pRet;
742 }
743
hook_memalign(void * (* fn)(size_t,size_t),size_t align,size_t bytes)744 void* hook_memalign(void* (*fn)(size_t, size_t), size_t align, size_t bytes)
745 {
746 void* pRet = nullptr;
747 if (fn) {
748 pRet = fn(align, bytes);
749 }
750 return pRet;
751 }
752
hook_realloc(void * (* fn)(void *,size_t),void * ptr,size_t size)753 void* hook_realloc(void* (*fn)(void*, size_t), void* ptr, size_t size)
754 {
755 void* pRet = nullptr;
756 if (fn) {
757 pRet = fn(ptr, size);
758 }
759 if (SimplifiedFilter(pRet, size)) {
760 return pRet;
761 }
762 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
763 return pRet;
764 }
765 if (!ohos_set_filter_size(size, pRet)) {
766 return pRet;
767 }
768
769 #ifdef PERFORMANCE_DEBUG
770 struct timespec start = {};
771 clock_gettime(CLOCK_REALTIME, &start);
772 #endif
773
774 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) {
775 #ifdef PERFORMANCE_DEBUG
776 g_mallocTimes++;
777 struct timespec end = {};
778 clock_gettime(CLOCK_REALTIME, &end);
779 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
780 if (g_mallocTimes % PRINT_INTERVAL == 0) {
781 PROFILER_LOG_ERROR(LOG_CORE,
782 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
783 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
784 }
785 #endif
786 return pRet;
787 }
788 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
789 auto holder = weakClient.lock();
790 if (holder == nullptr) {
791 return pRet;
792 }
793 StackRawData rawdata = {{{{0}}}};
794 StackRawData freeData = {{{{0}}}};
795 uintptr_t stackPtr = 0;
796 int stackSize = 0;
797 int fpStackDepth = 0;
798 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
799
800 if (g_ClientConfig.fpunwind) {
801 #ifdef __aarch64__
802 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
803 #endif
804 } else {
805 stackSize = GetStackSize(stackPtr, rawdata);
806 }
807
808 rawdata.type = MALLOC_MSG;
809 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
810 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
811 rawdata.mallocSize = size;
812 rawdata.addr = pRet;
813 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
814 Addr2Bitpool(pRet);
815 }
816 int realSize = 0;
817 int freeRealSize = 0;
818 freeData.type = FREE_MSG;
819 freeData.pid = rawdata.pid;
820 freeData.tid = rawdata.tid;
821 freeData.mallocSize = 0;
822 freeData.addr = ptr;
823 freeData.ts = rawdata.ts;
824 if (g_ClientConfig.fpunwind) {
825 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
826 freeRealSize = sizeof(BaseStackRawData);
827 } else {
828 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
829 freeRealSize = realSize;
830 }
831 holder->SendStackWithPayload(&freeData, freeRealSize, nullptr, 0); // 0: Don't unwind the freeData
832 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
833 #ifdef PERFORMANCE_DEBUG
834 g_mallocTimes++;
835 struct timespec end = {};
836 clock_gettime(CLOCK_REALTIME, &end);
837 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
838 if (g_mallocTimes % PRINT_INTERVAL == 0) {
839 PROFILER_LOG_ERROR(LOG_CORE,
840 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
841 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
842 }
843 #endif
844 return pRet;
845 }
846
hook_malloc_usable_size(size_t (* fn)(void *),void * ptr)847 size_t hook_malloc_usable_size(size_t (*fn)(void*), void* ptr)
848 {
849 size_t ret = 0;
850 if (fn) {
851 ret = fn(ptr);
852 }
853
854 return ret;
855 }
856
hook_free(void (* free_func)(void *),void * p)857 void hook_free(void (*free_func)(void*), void* p)
858 {
859 if (g_ClientConfig.statisticsInterval > 0) {
860 if (!free_func) {
861 return;
862 }
863 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
864 free_func(p);
865 return;
866 }
867 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
868 if (!IsAddrExist(p)) {
869 free_func(p);
870 return;
871 }
872 }
873 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
874 auto holder = weakClient.lock();
875 if ((holder != nullptr) && p) {
876 holder->SendStackWithPayload(&p, sizeof(void*), nullptr, 0);
877 }
878 free_func(p);
879 #ifdef PERFORMANCE_DEBUG
880 g_mallocTimes++;
881 struct timespec end = {};
882 clock_gettime(CLOCK_REALTIME, &end);
883 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
884 if (g_mallocTimes % PRINT_INTERVAL == 0) {
885 PROFILER_LOG_ERROR(LOG_CORE,
886 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
887 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
888 }
889 #endif
890 return;
891 }
892 struct timespec freeTime = {};
893 clock_gettime(g_ClientConfig.clockId, &freeTime);
894 if (free_func) {
895 free_func(p);
896 }
897 if (g_ClientConfig.mallocDisable || IsPidChanged()) {
898 return;
899 }
900 if (g_ClientConfig.sampleInterval >= THRESHOLD) {
901 if (!IsAddrExist(p)) {
902 return;
903 }
904 }
905 #ifdef PERFORMANCE_DEBUG
906 struct timespec start = {};
907 clock_gettime(CLOCK_REALTIME, &start);
908 #endif
909 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
910 auto holder = weakClient.lock();
911 if (holder == nullptr) {
912 return;
913 }
914 StackRawData rawdata = {{{{0}}}};
915 uintptr_t stackPtr = 0;
916 int stackSize = 0;
917 int fpStackDepth = 0;
918 rawdata.ts = freeTime;
919 if (g_ClientConfig.freeStackData) {
920 if (g_ClientConfig.fpunwind) {
921 #ifdef __aarch64__
922 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
923 rawdata.jsChainId = getJsChainId();
924 #endif
925 } else {
926 stackSize = GetStackSize(stackPtr, rawdata);
927 }
928 }
929 rawdata.type = FREE_MSG;
930 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
931 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
932 rawdata.mallocSize = 0;
933 rawdata.addr = p;
934 int realSize = 0;
935 if (g_ClientConfig.fpunwind) {
936 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
937 } else {
938 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
939 }
940 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
941 #ifdef PERFORMANCE_DEBUG
942 g_mallocTimes++;
943 struct timespec end = {};
944 clock_gettime(CLOCK_REALTIME, &end);
945 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
946 if (g_mallocTimes % PRINT_INTERVAL == 0) {
947 PROFILER_LOG_ERROR(LOG_CORE,
948 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
949 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
950 }
951 #endif
952 }
953
SendMmapFileRawData(int prot,int flags,off_t offset,const std::string & filePath,const StackRawData & rawdata,std::shared_ptr<HookSocketClient> & holder)954 inline void SendMmapFileRawData(int prot, int flags, off_t offset, const std::string& filePath,
955 const StackRawData& rawdata, std::shared_ptr<HookSocketClient>& holder)
956 {
957 NameData curRawdata = {{{{0}}}};
958 curRawdata.addr = rawdata.addr;
959 curRawdata.pid = static_cast<uint32_t>(g_hookPid.load());
960 curRawdata.mallocSize = rawdata.mallocSize;
961 curRawdata.mmapArgs.offset = offset;
962 curRawdata.type = OHOS::Developtools::NativeDaemon::MMAP_FILE_TYPE;
963 if (prot & PROT_EXEC) {
964 curRawdata.mmapArgs.flags |= PROT_EXEC;
965 }
966 size_t len = strlen(filePath.c_str()) + 1;
967 if ((flags & MAP_FIXED) && (g_ClientConfig.responseLibraryMode) && (IsLegalSoName(filePath)) &&
968 (filePath.find("ld-musl") != std::string::npos || filePath.find("libc++") != std::string::npos)) {
969 std::lock_guard<std::recursive_timed_mutex> guard(g_FilterMapMutex);
970 ParseEvent(filePath, g_filterStaLibRange, curRawdata);
971 }
972 if (strncpy_s(curRawdata.name, MAX_HOOK_PATH + 1, filePath.c_str(), len) != EOK) {
973 return;
974 }
975 if (flags & MAP_FIXED) {
976 curRawdata.mmapArgs.flags |= MAP_FIXED;
977 }
978 holder->SendStackWithPayload(&curRawdata, sizeof(BaseStackRawData) + len, nullptr, 0);
979 }
980
hook_mmap(void * (* fn)(void *,size_t,int,int,int,off_t),void * addr,size_t length,int prot,int flags,int fd,off_t offset)981 void* hook_mmap(void*(*fn)(void*, size_t, int, int, int, off_t),
982 void* addr, size_t length, int prot, int flags, int fd, off_t offset)
983 {
984 void* ret = nullptr;
985 if (fn) {
986 ret = fn(addr, length, prot, flags, fd, offset);
987 }
988 if (g_ClientConfig.largestSize > 0) {
989 if ((fd <= 0) || IsPidChanged()) {
990 return ret;
991 }
992 } else {
993 if (g_ClientConfig.mmapDisable || IsPidChanged()) {
994 return ret;
995 }
996 }
997 #ifdef PERFORMANCE_DEBUG
998 struct timespec start = {};
999 clock_gettime(CLOCK_REALTIME, &start);
1000 #endif
1001
1002 if ((fd < 0 && offset == 0) && g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL
1003 && g_sampler.StartSampling(length) == 0) {
1004 #ifdef PERFORMANCE_DEBUG
1005 g_mallocTimes++;
1006 struct timespec end = {};
1007 clock_gettime(CLOCK_REALTIME, &end);
1008 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1009 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1010 PROFILER_LOG_ERROR(LOG_CORE,
1011 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1012 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1013 }
1014 #endif
1015 return ret;
1016 }
1017 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1018 auto holder = weakClient.lock();
1019 if (holder == nullptr) {
1020 return ret;
1021 }
1022 StackRawData rawdata = {{{{0}}}};
1023 uintptr_t stackPtr = 0;
1024 int stackSize = 0;
1025 int fpStackDepth = 0;
1026 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1027
1028 if (g_ClientConfig.fpunwind) {
1029 #ifdef __aarch64__
1030 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
1031 rawdata.jsChainId = getJsChainId();
1032 #endif
1033 } else {
1034 stackSize = GetStackSize(stackPtr, rawdata);
1035 }
1036
1037 rawdata.type = MMAP_MSG;
1038 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
1039 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1040 rawdata.mallocSize = length;
1041 rawdata.addr = ret;
1042 if (fd >= 0) {
1043 rawdata.type = MMAP_FILE_PAGE_MSG;
1044 char path[FD_PATH_LENGTH] = {0};
1045 char fileName[MAX_HOOK_PATH + 1] = {0};
1046 (void)snprintf_s(path, FD_PATH_LENGTH, FD_PATH_LENGTH - 1, "/proc/self/fd/%d", fd);
1047 ssize_t len = readlink(path, fileName, sizeof(fileName) - 1);
1048 if (len != -1) {
1049 fileName[len] = '\0';
1050 SendMmapFileRawData(prot, flags, offset, fileName, rawdata, holder);
1051 char* p = strrchr(fileName, '/');
1052 if (p != nullptr) {
1053 rawdata.tagId = GetTagId(holder, &fileName[p - fileName + 1]);
1054 } else {
1055 rawdata.tagId = GetTagId(holder, fileName);
1056 }
1057 }
1058 }
1059 if (!UpdateThreadName(holder)) {
1060 return ret;
1061 }
1062 int realSize = 0;
1063 if (g_ClientConfig.fpunwind) {
1064 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1065 } else {
1066 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1067 }
1068 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
1069 #ifdef PERFORMANCE_DEBUG
1070 g_mallocTimes++;
1071 struct timespec end = {};
1072 clock_gettime(CLOCK_REALTIME, &end);
1073 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1074 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1075 PROFILER_LOG_ERROR(LOG_CORE,
1076 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1077 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1078 }
1079 #endif
1080 return ret;
1081 }
1082
hook_munmap(int (* fn)(void *,size_t),void * addr,size_t length)1083 int hook_munmap(int(*fn)(void*, size_t), void* addr, size_t length)
1084 {
1085 int ret = -1;
1086 struct timespec unmapTime = {};
1087 clock_gettime(g_ClientConfig.clockId, &unmapTime);
1088 if (fn) {
1089 ret = fn(addr, length);
1090 }
1091 if (g_ClientConfig.mmapDisable || IsPidChanged()) {
1092 return ret;
1093 }
1094 #ifdef PERFORMANCE_DEBUG
1095 struct timespec start = {};
1096 clock_gettime(CLOCK_REALTIME, &start);
1097 #endif
1098
1099 int stackSize = 0;
1100 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1101 auto holder = weakClient.lock();
1102 if (holder == nullptr) {
1103 return ret;
1104 }
1105 StackRawData rawdata = {{{{0}}}};
1106 uintptr_t stackPtr = 0;
1107 int fpStackDepth = 0;
1108 rawdata.ts = unmapTime;
1109 if (g_ClientConfig.munmapStackData) {
1110 if (g_ClientConfig.fpunwind) {
1111 #ifdef __aarch64__
1112 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
1113 rawdata.jsChainId = getJsChainId();
1114 #endif
1115 } else {
1116 stackSize = GetStackSize(stackPtr, rawdata);
1117 }
1118 }
1119
1120 rawdata.type = MUNMAP_MSG;
1121 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
1122 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1123 rawdata.mallocSize = length;
1124 rawdata.addr = addr;
1125 int realSize = 0;
1126 if (g_ClientConfig.fpunwind) {
1127 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1128 } else {
1129 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1130 }
1131 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
1132 #ifdef PERFORMANCE_DEBUG
1133 g_mallocTimes++;
1134 struct timespec end = {};
1135 clock_gettime(CLOCK_REALTIME, &end);
1136 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1137 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1138 PROFILER_LOG_ERROR(LOG_CORE,
1139 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1140 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1141 }
1142 #endif
1143 return ret;
1144 }
1145
hook_prctl(int (* fn)(int,...),int option,unsigned long arg2,unsigned long arg3,unsigned long arg4,unsigned long arg5)1146 int hook_prctl(int(*fn)(int, ...),
1147 int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5)
1148 {
1149 int ret = -1;
1150 if (fn) {
1151 ret = fn(option, arg2, arg3, arg4, arg5);
1152 }
1153 if (reinterpret_cast<char*>(arg5) == nullptr || IsPidChanged() || g_ClientConfig.mmapDisable) {
1154 return ret;
1155 }
1156 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1157 auto holder = weakClient.lock();
1158 if (holder == nullptr) {
1159 return ret;
1160 }
1161 if (option == PR_SET_VMA && arg2 == PR_SET_VMA_ANON_NAME) {
1162 #ifdef PERFORMANCE_DEBUG
1163 struct timespec start = {};
1164 clock_gettime(CLOCK_REALTIME, &start);
1165 #endif
1166 NameData rawdata = {{{{0}}}};
1167 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1168 rawdata.type = PR_SET_VMA_MSG;
1169 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
1170 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1171 rawdata.mallocSize = arg4;
1172 rawdata.addr = reinterpret_cast<void*>(arg3);
1173 size_t tagLen = strlen(reinterpret_cast<char*>(arg5)) + 1;
1174 if (memcpy_s(rawdata.name, sizeof(rawdata.name), reinterpret_cast<char*>(arg5), tagLen) != EOK) {
1175 HILOG_BASE_ERROR(LOG_CORE, "memcpy_s tag failed");
1176 }
1177 rawdata.name[sizeof(rawdata.name) - 1] = '\0';
1178 holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData) + tagLen, nullptr, 0);
1179 #ifdef PERFORMANCE_DEBUG
1180 g_mallocTimes++;
1181 struct timespec end = {};
1182 clock_gettime(CLOCK_REALTIME, &end);
1183 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1184 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1185 PROFILER_LOG_ERROR(LOG_CORE,
1186 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1187 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1188 }
1189 #endif
1190 }
1191 return ret;
1192 }
1193
hook_memtrace(void * addr,size_t size,const char * tag,bool isUsing)1194 void hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing)
1195 {
1196 if (!g_ClientConfig.memtraceEnable || IsPidChanged()) {
1197 return;
1198 }
1199 #ifdef PERFORMANCE_DEBUG
1200 struct timespec start = {};
1201 clock_gettime(CLOCK_REALTIME, &start);
1202 #endif
1203 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1204 auto holder = weakClient.lock();
1205 if (holder == nullptr) {
1206 return;
1207 }
1208 int stackSize = 0;
1209 StackRawData rawdata = {{{{0}}}};
1210 uintptr_t stackPtr = 0;
1211 int fpStackDepth = 0;
1212 clock_gettime(g_ClientConfig.clockId, &rawdata.ts);
1213
1214 if (isUsing) {
1215 if (g_ClientConfig.fpunwind) {
1216 #ifdef __aarch64__
1217 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip);
1218 rawdata.jsChainId = getJsChainId();
1219 #endif
1220 } else {
1221 stackSize = GetStackSize(stackPtr, rawdata);
1222 }
1223 }
1224 rawdata.type = isUsing ? MEMORY_USING_MSG : MEMORY_UNUSING_MSG;
1225 rawdata.pid = static_cast<uint32_t>(g_hookPid.load());
1226 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
1227 rawdata.mallocSize = size;
1228 rawdata.addr = addr;
1229 rawdata.tagId = isUsing ? GetTagId(holder, tag) : 0;
1230 int realSize = 0;
1231 if (g_ClientConfig.fpunwind) {
1232 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t));
1233 } else {
1234 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs);
1235 }
1236 holder->SendStackWithPayload(&rawdata, realSize, reinterpret_cast<void *>(stackPtr), stackSize);
1237 #ifdef PERFORMANCE_DEBUG
1238 g_mallocTimes++;
1239 struct timespec end = {};
1240 clock_gettime(CLOCK_REALTIME, &end);
1241 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
1242 if (g_mallocTimes % PRINT_INTERVAL == 0) {
1243 PROFILER_LOG_ERROR(LOG_CORE,
1244 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
1245 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
1246 }
1247 #endif
1248 }
1249
ohos_malloc_hook_initialize(const MallocDispatchType * malloc_dispatch,bool *,const char *)1250 bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*)
1251 {
1252 g_dispatch.store(malloc_dispatch);
1253 InititalizeIPC();
1254 return true;
1255 }
ohos_malloc_hook_finalize(void)1256 void ohos_malloc_hook_finalize(void)
1257 {
1258 FinalizeIPC();
1259 }
1260
ohos_malloc_hook_malloc(size_t size)1261 void* ohos_malloc_hook_malloc(size_t size)
1262 {
1263 __set_hook_flag(false);
1264 void* ret = hook_malloc(GetDispatch()->malloc, size);
1265 __set_hook_flag(true);
1266 return ret;
1267 }
1268
ohos_malloc_hook_realloc(void * ptr,size_t size)1269 void* ohos_malloc_hook_realloc(void* ptr, size_t size)
1270 {
1271 __set_hook_flag(false);
1272 void* ret = hook_realloc(GetDispatch()->realloc, ptr, size);
1273 __set_hook_flag(true);
1274 return ret;
1275 }
1276
ohos_malloc_hook_calloc(size_t number,size_t size)1277 void* ohos_malloc_hook_calloc(size_t number, size_t size)
1278 {
1279 __set_hook_flag(false);
1280 void* ret = hook_calloc(GetDispatch()->calloc, number, size);
1281 __set_hook_flag(true);
1282 return ret;
1283 }
1284
ohos_malloc_hook_valloc(size_t size)1285 void* ohos_malloc_hook_valloc(size_t size)
1286 {
1287 __set_hook_flag(false);
1288 void* ret = hook_valloc(GetDispatch()->valloc, size);
1289 __set_hook_flag(true);
1290 return ret;
1291 }
1292
ohos_malloc_hook_free(void * p)1293 void ohos_malloc_hook_free(void* p)
1294 {
1295 __set_hook_flag(false);
1296 hook_free(GetDispatch()->free, p);
1297 __set_hook_flag(true);
1298 }
1299
ohos_malloc_hook_malloc_usable_size(void * mem)1300 size_t ohos_malloc_hook_malloc_usable_size(void* mem)
1301 {
1302 __set_hook_flag(false);
1303 size_t ret = hook_malloc_usable_size(GetDispatch()->malloc_usable_size, mem);
1304 __set_hook_flag(true);
1305 return ret;
1306 }
1307
ohos_malloc_hook_get_hook_flag(void)1308 bool ohos_malloc_hook_get_hook_flag(void)
1309 {
1310 return pthread_getspecific(g_disableHookFlag) == nullptr;
1311 }
1312
ohos_malloc_hook_set_hook_flag(bool flag)1313 bool ohos_malloc_hook_set_hook_flag(bool flag)
1314 {
1315 bool oldFlag = ohos_malloc_hook_get_hook_flag();
1316 if (flag) {
1317 pthread_setspecific(g_disableHookFlag, nullptr);
1318 } else {
1319 pthread_setspecific(g_disableHookFlag, reinterpret_cast<void *>(1));
1320 }
1321 return oldFlag;
1322 }
1323
ohos_malloc_hook_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)1324 void* ohos_malloc_hook_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
1325 {
1326 __set_hook_flag(false);
1327 void* ret = hook_mmap(GetDispatch()->mmap, addr, length, prot, flags, fd, offset);
1328 __set_hook_flag(true);
1329 return ret;
1330 }
1331
ohos_malloc_hook_munmap(void * addr,size_t length)1332 int ohos_malloc_hook_munmap(void* addr, size_t length)
1333 {
1334 __set_hook_flag(false);
1335 int ret = hook_munmap(GetDispatch()->munmap, addr, length);
1336 __set_hook_flag(true);
1337 return ret;
1338 }
1339
ohos_malloc_hook_memtrace(void * addr,size_t size,const char * tag,bool isUsing)1340 void ohos_malloc_hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing)
1341 {
1342 __set_hook_flag(false);
1343 hook_memtrace(addr, size, tag, isUsing);
1344 __set_hook_flag(true);
1345 }
1346
ohos_malloc_hook_aligned_alloc(size_t align,size_t len)1347 void* ohos_malloc_hook_aligned_alloc(size_t align, size_t len)
1348 {
1349 __set_hook_flag(false);
1350 void* ret = hook_aligned_alloc(GetDispatch()->aligned_alloc, align, len);
1351 __set_hook_flag(true);
1352 return ret;
1353 }
1354
ohos_malloc_hook_prctl(int option,unsigned long arg2,unsigned long arg3,unsigned long arg4,unsigned long arg5)1355 int ohos_malloc_hook_prctl(int option, unsigned long arg2, unsigned long arg3,
1356 unsigned long arg4, unsigned long arg5)
1357 {
1358 __set_hook_flag(false);
1359 int ret = hook_prctl((GetDispatch()->prctl), option, arg2, arg3, arg4, arg5);
1360 __set_hook_flag(true);
1361 return ret;
1362 }
1363
ohos_set_filter_size(size_t size,void * ret)1364 bool ohos_set_filter_size(size_t size, void* ret)
1365 {
1366 if (g_ClientConfig.filterSize < 0 || size < static_cast<size_t>(g_ClientConfig.filterSize) || size > g_maxSize) {
1367 return false;
1368 }
1369 return true;
1370 }
1371
IsPidChanged(void)1372 static bool IsPidChanged(void)
1373 {
1374 if (g_isPidChanged) {
1375 return true;
1376 }
1377 int pid = getprocpid();
1378 // hap app after pid namespace used
1379 if (pid == PID_NAMESPACE_ID) {
1380 return false;
1381 } else {
1382 // native app & sa service
1383 g_isPidChanged = (g_hookPid.load() != 0 && g_hookPid.load() != pid);
1384 }
1385 return g_isPidChanged;
1386 }
1387
ohos_malloc_hook_send_hook_misc_data(uint64_t id,const char * stackPtr,size_t stackSize,uint32_t type)1388 bool ohos_malloc_hook_send_hook_misc_data(uint64_t id, const char* stackPtr, size_t stackSize, uint32_t type)
1389 {
1390 if (type == static_cast<uint32_t>(MISC_TYPE::JS_STACK_DATA)) {
1391 BaseStackRawData rawdata = {};
1392 rawdata.jsChainId = id;
1393 rawdata.type = JS_STACK_MSG;
1394 std::weak_ptr<HookSocketClient> weakClient = g_hookClient;
1395 auto holder = weakClient.lock();
1396 if (holder != nullptr) {
1397 return holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData), stackPtr, stackSize);
1398 }
1399 }
1400 return false;
1401 }
1402
ohos_malloc_hook_get_hook_config()1403 void* ohos_malloc_hook_get_hook_config()
1404 {
1405 return &g_ClientConfig.arktsConfig;
1406 }