1 /*
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <atomic>
17 #include <climits>
18 #include <dlfcn.h>
19 #include <fcntl.h>
20 #include <string>
21 #include <sys/time.h>
22 #include <pthread.h>
23 #include <sys/prctl.h>
24 #include <unordered_set>
25 #include "common.h"
26 #include "hook_common.h"
27 #include "hook_socket_client.h"
28 #include "musl_preinit_common.h"
29 #include "parameter.h"
30 #include "stack_writer.h"
31 #include "runtime_stack_range.h"
32 #include "register.h"
33 #include "virtual_runtime.h"
34 #include "get_thread_id.h"
35 #include "hook_client.h"
36
37 static pthread_key_t g_disableHookFlag;
38 static pthread_key_t g_hookTid;
39 namespace {
40 static std::atomic<uint64_t> g_timeCost = 0;
41 static std::atomic<uint64_t> g_mallocTimes = 0;
42 static std::atomic<uint64_t> g_dataCounts = 0;
43 using OHOS::Developtools::NativeDaemon::buildArchType;
44 static std::shared_ptr<HookSocketClient> g_hookClient;
45 std::recursive_timed_mutex g_ClientMutex;
46 std::atomic<const MallocDispatchType*> g_dispatch {nullptr};
47 constexpr int TIMEOUT_MSEC = 2000;
48 constexpr int PRINT_INTERVAL = 5000;
49 constexpr uint64_t S_TO_NS = 1000 * 1000 * 1000;
50 static pid_t g_hookPid = 0;
51 static ClientConfig g_ClientConfig = {0};
52 static uint32_t g_minSize = 0;
53 static uint32_t g_maxSize = INT_MAX;
54 static std::unordered_set<void*> g_mallocIgnoreSet;
55 constexpr int PID_STR_SIZE = 4;
56 constexpr int STATUS_LINE_SIZE = 512;
57 constexpr int PID_NAMESPACE_ID = 1; // 1: pid is 1 after pid namespace used
58 static bool g_isPidChanged = false;
GetDispatch()59 const MallocDispatchType* GetDispatch()
60 {
61 return g_dispatch.load(std::memory_order_relaxed);
62 }
63
InititalizeIPC()64 bool InititalizeIPC()
65 {
66 return true;
67 }
FinalizeIPC()68 void FinalizeIPC() {}
69
ConvertPid(char * buf)70 int ConvertPid(char* buf)
71 {
72 unsigned long count = 0;
73 char pidBuf[11] = {0}; /* 11: 32 bits to the maximum length of a string */
74 char *str = buf;
75 while (*str != '\0') {
76 if ((*str >= '0') && (*str <= '9') && (static_cast<unsigned long>(count) < sizeof(pidBuf) - 1)) {
77 pidBuf[count] = *str;
78 count++;
79 str++;
80 continue;
81 }
82
83 if (count > 0) {
84 break;
85 }
86 str++;
87 }
88 return atoi(pidBuf);
89 }
90
GetRealPid(void)91 pid_t GetRealPid(void)
92 {
93 const char *path = "/proc/self/status";
94 char buf[STATUS_LINE_SIZE] = {0};
95 FILE *fp = fopen(path, "r");
96 if (fp == nullptr) {
97 return -1;
98 }
99 while (!feof(fp)) {
100 if (fgets(buf, STATUS_LINE_SIZE, fp) == nullptr) {
101 fclose(fp);
102 return -1;
103 }
104 if (strncmp(buf, "Pid:", PID_STR_SIZE) == 0) {
105 break;
106 }
107 }
108 (void)fclose(fp);
109 return static_cast<pid_t>(ConvertPid(buf));
110 }
111 } // namespace
112
GetCurThreadId()113 pid_t inline __attribute__((always_inline)) GetCurThreadId()
114 {
115 if (pthread_getspecific(g_hookTid) == nullptr) {
116 pthread_setspecific(g_hookTid, reinterpret_cast<void *>(get_thread_id()));
117 }
118 return reinterpret_cast<long>((pthread_getspecific(g_hookTid)));
119 }
120
121 static bool IsPidChanged(void);
122
ohos_malloc_hook_on_start(void)123 bool ohos_malloc_hook_on_start(void)
124 {
125 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
126 COMMON::PrintMallinfoLog("before hook(byte) => ");
127 g_hookPid = GetRealPid();
128 g_mallocTimes = 0;
129 if (g_hookClient != nullptr) {
130 HILOG_INFO(LOG_CORE, "hook already started");
131 return true;
132 } else {
133 g_hookClient = std::make_shared<HookSocketClient>(g_hookPid, &g_ClientConfig);
134 }
135 pthread_key_create(&g_disableHookFlag, nullptr);
136 pthread_setspecific(g_disableHookFlag, nullptr);
137 pthread_key_create(&g_hookTid, nullptr);
138 pthread_setspecific(g_hookTid, nullptr);
139 HILOG_INFO(LOG_CORE, "ohos_malloc_hook_on_start");
140 GetMainThreadRuntimeStackRange();
141 g_minSize = g_ClientConfig.filterSize_;
142 constexpr int paramBufferLen = 128;
143 char paramOutBuf[paramBufferLen] = {0};
144 int ret = GetParameter("persist.hiviewdfx.profiler.mem.filter", "", paramOutBuf, paramBufferLen);
145 if (ret > 0) {
146 int min = 0;
147 int max = 0;
148 if (sscanf_s(paramOutBuf, "%d,%d", &min, &max) == 2) { // 2: two parameters.
149 g_maxSize = max > 0 ? max : INT_MAX;
150 g_minSize = min > 0 ? min : 0;
151 }
152 HILOG_INFO(LOG_CORE, "persist.hiviewdfx.profiler.mem.filter %s, min %d, max %d", paramOutBuf, g_minSize,
153 g_maxSize);
154 }
155 return true;
156 }
157
ohos_release_on_end(void *)158 void* ohos_release_on_end(void*)
159 {
160 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
161 g_hookClient = nullptr;
162 pthread_key_delete(g_disableHookFlag);
163 pthread_key_delete(g_hookTid);
164 g_mallocIgnoreSet.clear();
165 HILOG_INFO(LOG_CORE, "ohos_malloc_hook_on_end, mallocTimes :%" PRIu64, g_mallocTimes.load());
166 COMMON::PrintMallinfoLog("after hook(byte) => ");
167 return nullptr;
168 }
169
ohos_malloc_hook_on_end(void)170 bool ohos_malloc_hook_on_end(void)
171 {
172 if (g_hookClient != nullptr) {
173 g_hookClient->Flush();
174 }
175 pthread_t threadEnd;
176 if (pthread_create(&threadEnd, nullptr, ohos_release_on_end, nullptr)) {
177 HILOG_INFO(LOG_CORE, "create ohos_release_on_end fail");
178 return false;
179 }
180 pthread_detach(threadEnd);
181 return true;
182 }
183
184 #if defined(__aarch64__)
FpUnwind(int maxDepth,uint64_t * ip,unsigned long stackSize)185 static void inline __attribute__((always_inline)) FpUnwind(int maxDepth, uint64_t *ip, unsigned long stackSize)
186 {
187 void **startfp = (void **)__builtin_frame_address(0);
188 void **fp = startfp;
189 for (int i = 0; i < maxDepth + 1; i++) {
190 ip[i] = *(reinterpret_cast<unsigned long *>(fp + 1));
191 void **nextFp = (void **)*fp;
192 if (nextFp <= fp) {
193 break;
194 }
195 if (((nextFp - startfp) * sizeof(void *)) > static_cast<unsigned long>(stackSize)) {
196 break;
197 }
198 fp = nextFp;
199 }
200 }
201 #endif
202
hook_malloc(void * (* fn)(size_t),size_t size)203 void* hook_malloc(void* (*fn)(size_t), size_t size)
204 {
205 void* ret = nullptr;
206 if (fn) {
207 ret = fn(size);
208 }
209 if (g_ClientConfig.mallocDisable_ || IsPidChanged()) {
210 return ret;
211 }
212 if (!ohos_set_filter_size(size, ret)) {
213 return ret;
214 }
215 #ifdef PERFORMANCE_DEBUG
216 struct timespec start = {};
217 clock_gettime(CLOCK_REALTIME, &start);
218 #endif
219 StackRawData rawdata = {{{0}}};
220 const char* stackptr = nullptr;
221 const char* stackendptr = nullptr;
222 int stackSize = 0;
223 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
224
225 if (g_ClientConfig.fpunwind_) {
226 #ifdef __aarch64__
227 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
228 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
229 stackSize = stackendptr - stackptr;
230 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
231 stackSize = 0;
232 #endif
233 } else {
234 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
235 #if defined(__arm__)
236 asm volatile(
237 "mov r3, r13\n"
238 "mov r4, r15\n"
239 "stmia %[base], {r3-r4}\n"
240 : [ base ] "+r"(regs)
241 :
242 : "r3", "r4", "memory");
243 #elif defined(__aarch64__)
244 asm volatile(
245 "1:\n"
246 "stp x28, x29, [%[base], #224]\n"
247 "str x30, [%[base], #240]\n"
248 "mov x12, sp\n"
249 "adr x13, 1b\n"
250 "stp x12, x13, [%[base], #248]\n"
251 : [ base ] "+r"(regs)
252 :
253 : "x12", "x13", "memory");
254 #endif
255 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
256 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
257 stackSize = stackendptr - stackptr;
258 }
259 rawdata.type = MALLOC_MSG;
260 rawdata.pid = static_cast<uint32_t>(g_hookPid);
261 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
262 rawdata.mallocSize = size;
263 rawdata.addr = ret;
264 prctl(PR_GET_NAME, rawdata.tname);
265 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
266 std::chrono::time_point<std::chrono::steady_clock> timeout =
267 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
268 if (!lck.try_lock_until(timeout)) {
269 HILOG_ERROR(LOG_CORE, "lock hook_malloc failed!");
270 return ret;
271 }
272
273 if (g_hookClient != nullptr) {
274 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
275 }
276 g_mallocTimes++;
277 #ifdef PERFORMANCE_DEBUG
278 struct timespec end = {};
279 clock_gettime(CLOCK_REALTIME, &end);
280 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec);
281 g_dataCounts += stackSize;
282 if (g_mallocTimes % PRINT_INTERVAL == 0) {
283 HILOG_ERROR(LOG_CORE,
284 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n",
285 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load());
286 }
287 #endif
288 return ret;
289 }
290
hook_valloc(void * (* fn)(size_t),size_t size)291 void* hook_valloc(void* (*fn)(size_t), size_t size)
292 {
293 void* pRet = nullptr;
294 if (fn) {
295 pRet = fn(size);
296 }
297 return pRet;
298 }
299
hook_calloc(void * (* fn)(size_t,size_t),size_t number,size_t size)300 void* hook_calloc(void* (*fn)(size_t, size_t), size_t number, size_t size)
301 {
302 void* pRet = nullptr;
303 if (fn) {
304 pRet = fn(number, size);
305 }
306 if (g_ClientConfig.mallocDisable_ || IsPidChanged()) {
307 return pRet;
308 }
309 if (!ohos_set_filter_size(number * size, pRet)) {
310 return pRet;
311 }
312
313 StackRawData rawdata = {{{0}}};
314 const char* stackptr = nullptr;
315 const char* stackendptr = nullptr;
316 int stackSize = 0;
317 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
318
319 if (g_ClientConfig.fpunwind_) {
320 #ifdef __aarch64__
321 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
322 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
323 stackSize = stackendptr - stackptr;
324 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
325 stackSize = 0;
326 #endif
327 } else {
328 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
329 #if defined(__arm__)
330 asm volatile(
331 "mov r3, r13\n"
332 "mov r4, r15\n"
333 "stmia %[base], {r3-r4}\n"
334 : [ base ] "+r"(regs)
335 :
336 : "r3", "r4", "memory");
337 #elif defined(__aarch64__)
338 asm volatile(
339 "1:\n"
340 "stp x28, x29, [%[base], #224]\n"
341 "str x30, [%[base], #240]\n"
342 "mov x12, sp\n"
343 "adr x13, 1b\n"
344 "stp x12, x13, [%[base], #248]\n"
345 : [ base ] "+r"(regs)
346 :
347 : "x12", "x13", "memory");
348 #endif
349 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
350 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
351 stackSize = stackendptr - stackptr;
352 }
353 rawdata.type = MALLOC_MSG;
354 rawdata.pid = static_cast<uint32_t>(g_hookPid);
355 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
356 rawdata.mallocSize = number * size;
357 rawdata.addr = pRet;
358 prctl(PR_GET_NAME, rawdata.tname);
359 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
360 std::chrono::time_point<std::chrono::steady_clock> timeout =
361 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
362 if (!lck.try_lock_until(timeout)) {
363 HILOG_ERROR(LOG_CORE, "lock hook_calloc failed!");
364 return pRet;
365 }
366
367 if (g_hookClient != nullptr) {
368 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
369 }
370 g_mallocTimes++;
371 return pRet;
372 }
373
hook_memalign(void * (* fn)(size_t,size_t),size_t align,size_t bytes)374 void* hook_memalign(void* (*fn)(size_t, size_t), size_t align, size_t bytes)
375 {
376 void* pRet = nullptr;
377 if (fn) {
378 pRet = fn(align, bytes);
379 }
380 return pRet;
381 }
382
hook_realloc(void * (* fn)(void *,size_t),void * ptr,size_t size)383 void* hook_realloc(void* (*fn)(void*, size_t), void* ptr, size_t size)
384 {
385 void* pRet = nullptr;
386 if (fn) {
387 pRet = fn(ptr, size);
388 }
389 if (g_ClientConfig.mallocDisable_ || IsPidChanged()) {
390 return pRet;
391 }
392 if (!ohos_set_filter_size(size, pRet)) {
393 return pRet;
394 }
395
396 StackRawData rawdata = {{{0}}};
397 StackRawData freeData = {{{0}}};
398 const char* stackptr = nullptr;
399 const char* stackendptr = nullptr;
400 int stackSize = 0;
401 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
402
403 if (g_ClientConfig.fpunwind_) {
404 #ifdef __aarch64__
405 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
406 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
407 stackSize = stackendptr - stackptr;
408 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
409 stackSize = 0;
410 if (g_ClientConfig.freeStackData_) {
411 (void)memcpy_s(freeData.ip, sizeof(freeData.ip) / sizeof(uint64_t),
412 rawdata.ip, sizeof(rawdata.ip) / sizeof(uint64_t));
413 }
414 #endif
415 } else {
416 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
417 #if defined(__arm__)
418 asm volatile(
419 "mov r3, r13\n"
420 "mov r4, r15\n"
421 "stmia %[base], {r3-r4}\n"
422 : [ base ] "+r"(regs)
423 :
424 : "r3", "r4", "memory");
425 #elif defined(__aarch64__)
426 asm volatile(
427 "1:\n"
428 "stp x28, x29, [%[base], #224]\n"
429 "str x30, [%[base], #240]\n"
430 "mov x12, sp\n"
431 "adr x13, 1b\n"
432 "stp x12, x13, [%[base], #248]\n"
433 : [ base ] "+r"(regs)
434 :
435 : "x12", "x13", "memory");
436 #endif
437 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
438 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
439 stackSize = stackendptr - stackptr;
440 if (g_ClientConfig.freeStackData_) {
441 (void)memcpy_s(freeData.regs, sizeof(freeData.regs) / sizeof(char),
442 rawdata.regs, sizeof(rawdata.regs) / sizeof(char));
443 }
444 }
445 rawdata.type = MALLOC_MSG;
446 rawdata.pid = static_cast<uint32_t>(g_hookPid);
447 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
448 rawdata.mallocSize = size;
449 rawdata.addr = pRet;
450 prctl(PR_GET_NAME, rawdata.tname);
451 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
452 std::chrono::time_point<std::chrono::steady_clock> timeout =
453 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
454 if (!lck.try_lock_until(timeout)) {
455 HILOG_ERROR(LOG_CORE, "lock hook_realloc failed!");
456 return pRet;
457 }
458
459 if (g_hookClient != nullptr) {
460 freeData.type = FREE_MSG;
461 freeData.pid = rawdata.pid;
462 freeData.tid = rawdata.tid;
463 freeData.mallocSize = 0;
464 freeData.addr = ptr;
465 freeData.ts = rawdata.ts;
466 (void)memcpy_s(freeData.tname, sizeof(freeData.tname) / sizeof(char),
467 rawdata.tname, sizeof(rawdata.tname) / sizeof(char));
468 g_hookClient->SendStackWithPayload(&freeData, sizeof(freeData), nullptr, 0); // 0: Don't unwind the freeData
469 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
470 }
471 g_mallocTimes++;
472 return pRet;
473 }
474
hook_malloc_usable_size(size_t (* fn)(void *),void * ptr)475 size_t hook_malloc_usable_size(size_t (*fn)(void*), void* ptr)
476 {
477 size_t ret = 0;
478 if (fn) {
479 ret = fn(ptr);
480 }
481
482 return ret;
483 }
484
hook_free(void (* free_func)(void *),void * p)485 void hook_free(void (*free_func)(void*), void* p)
486 {
487 if (free_func) {
488 free_func(p);
489 }
490 if (g_ClientConfig.mallocDisable_ || IsPidChanged()) {
491 return;
492 }
493 {
494 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
495 auto record = g_mallocIgnoreSet.find(p);
496 if (record != g_mallocIgnoreSet.end()) {
497 g_mallocIgnoreSet.erase(record);
498 return;
499 }
500 }
501 StackRawData rawdata = {{{0}}};
502 const char* stackptr = nullptr;
503 const char* stackendptr = nullptr;
504 int stackSize = 0;
505 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
506
507 if (g_ClientConfig.freeStackData_) {
508 if (g_ClientConfig.fpunwind_) {
509 #ifdef __aarch64__
510 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
511 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
512 stackSize = stackendptr - stackptr;
513 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
514 stackSize = 0;
515 #endif
516 } else {
517 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
518 #if defined(__arm__)
519 asm volatile(
520 "mov r3, r13\n"
521 "mov r4, r15\n"
522 "stmia %[base], {r3-r4}\n"
523 : [ base ] "+r"(regs)
524 :
525 : "r3", "r4", "memory");
526 #elif defined(__aarch64__)
527 asm volatile(
528 "1:\n"
529 "stp x28, x29, [%[base], #224]\n"
530 "str x30, [%[base], #240]\n"
531 "mov x12, sp\n"
532 "adr x13, 1b\n"
533 "stp x12, x13, [%[base], #248]\n"
534 : [ base ] "+r"(regs)
535 :
536 : "x12", "x13", "memory");
537 #endif
538 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
539 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
540 stackSize = stackendptr - stackptr;
541 }
542 }
543
544 rawdata.type = FREE_MSG;
545 rawdata.pid = static_cast<uint32_t>(g_hookPid);
546 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
547 rawdata.mallocSize = 0;
548 rawdata.addr = p;
549 prctl(PR_GET_NAME, rawdata.tname);
550
551 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
552 std::chrono::time_point<std::chrono::steady_clock> timeout =
553 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
554 if (!lck.try_lock_until(timeout)) {
555 HILOG_ERROR(LOG_CORE, "lock hook_free failed!");
556 return;
557 }
558
559 if (g_hookClient != nullptr) {
560 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
561 }
562 }
563
hook_mmap(void * (* fn)(void *,size_t,int,int,int,off_t),void * addr,size_t length,int prot,int flags,int fd,off_t offset)564 void* hook_mmap(void*(*fn)(void*, size_t, int, int, int, off_t),
565 void* addr, size_t length, int prot, int flags, int fd, off_t offset)
566 {
567 void* ret = nullptr;
568 if (fn) {
569 ret = fn(addr, length, prot, flags, fd, offset);
570 }
571 if (g_ClientConfig.mmapDisable_ || IsPidChanged()) {
572 return ret;
573 }
574 StackRawData rawdata = {{{0}}};
575 const char* stackptr = nullptr;
576 const char* stackendptr = nullptr;
577 int stackSize = 0;
578 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
579
580 if (g_ClientConfig.fpunwind_) {
581 #ifdef __aarch64__
582 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
583 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
584 stackSize = stackendptr - stackptr;
585 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
586 stackSize = 0;
587 #endif
588 } else {
589 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
590 #if defined(__arm__)
591 asm volatile(
592 "mov r3, r13\n"
593 "mov r4, r15\n"
594 "stmia %[base], {r3-r4}\n"
595 : [ base ] "+r"(regs)
596 :
597 : "r3", "r4", "memory");
598 #elif defined(__aarch64__)
599 asm volatile(
600 "1:\n"
601 "stp x28, x29, [%[base], #224]\n"
602 "str x30, [%[base], #240]\n"
603 "mov x12, sp\n"
604 "adr x13, 1b\n"
605 "stp x12, x13, [%[base], #248]\n"
606 : [ base ] "+r"(regs)
607 :
608 : "x12", "x13", "memory");
609 #endif
610 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
611 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
612 stackSize = stackendptr - stackptr;
613 }
614
615 rawdata.type = MMAP_MSG;
616 rawdata.pid = static_cast<uint32_t>(g_hookPid);
617 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
618 rawdata.mallocSize = length;
619 rawdata.addr = ret;
620 prctl(PR_GET_NAME, rawdata.tname);
621
622 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
623 std::chrono::time_point<std::chrono::steady_clock> timeout =
624 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
625 if (!lck.try_lock_until(timeout)) {
626 HILOG_ERROR(LOG_CORE, "lock hook_mmap failed!");
627 return ret;
628 }
629 if (g_hookClient != nullptr) {
630 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
631 }
632 return ret;
633 }
634
hook_munmap(int (* fn)(void *,size_t),void * addr,size_t length)635 int hook_munmap(int(*fn)(void*, size_t), void* addr, size_t length)
636 {
637 int ret = -1;
638 if (fn) {
639 ret = fn(addr, length);
640 }
641 if (g_ClientConfig.mmapDisable_ || IsPidChanged()) {
642 return ret;
643 }
644 int stackSize = 0;
645 StackRawData rawdata = {{{0}}};
646 const char* stackptr = nullptr;
647 const char* stackendptr = nullptr;
648 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
649
650 if (g_ClientConfig.munmapStackData_) {
651 if (g_ClientConfig.fpunwind_) {
652 #ifdef __aarch64__
653 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0));
654 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
655 stackSize = stackendptr - stackptr;
656 FpUnwind(g_ClientConfig.maxStackDepth_, rawdata.ip, stackSize);
657 stackSize = 0;
658 #endif
659 } else {
660 uint64_t* regs = reinterpret_cast<uint64_t*>(&(rawdata.regs));
661 #if defined(__arm__)
662 asm volatile(
663 "mov r3, r13\n"
664 "mov r4, r15\n"
665 "stmia %[base], {r3-r4}\n"
666 : [ base ] "+r"(regs)
667 :
668 : "r3", "r4", "memory");
669 #elif defined(__aarch64__)
670 asm volatile(
671 "1:\n"
672 "stp x28, x29, [%[base], #224]\n"
673 "str x30, [%[base], #240]\n"
674 "mov x12, sp\n"
675 "adr x13, 1b\n"
676 "stp x12, x13, [%[base], #248]\n"
677 : [ base ] "+r"(regs)
678 :
679 : "x12", "x13", "memory");
680 #endif
681 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]);
682 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer
683 stackSize = stackendptr - stackptr;
684 }
685 }
686
687 rawdata.type = MUNMAP_MSG;
688 rawdata.pid = static_cast<uint32_t>(g_hookPid);
689 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
690 rawdata.mallocSize = length;
691 rawdata.addr = addr;
692 prctl(PR_GET_NAME, rawdata.tname);
693
694 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
695 std::chrono::time_point<std::chrono::steady_clock> timeout =
696 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
697 if (!lck.try_lock_until(timeout)) {
698 HILOG_ERROR(LOG_CORE, "lock hook_munmap failed!");
699 return ret;
700 }
701 if (g_hookClient != nullptr) {
702 g_hookClient->SendStackWithPayload(&rawdata, sizeof(rawdata), stackptr, stackSize);
703 }
704 return ret;
705 }
706
hook_prctl(int (* fn)(int,...),int option,unsigned long arg2,unsigned long arg3,unsigned long arg4,unsigned long arg5)707 int hook_prctl(int(*fn)(int, ...),
708 int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5)
709 {
710 int ret = -1;
711 if (fn) {
712 ret = fn(option, arg2, arg3, arg4, arg5);
713 }
714 if (IsPidChanged()) {
715 return ret;
716 }
717 if (option == PR_SET_VMA && arg2 == PR_SET_VMA_ANON_NAME) {
718 StackRawData rawdata = {{{0}}};
719 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
720 rawdata.type = PR_SET_VMA_MSG;
721 rawdata.pid = static_cast<uint32_t>(g_hookPid);
722 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
723 rawdata.mallocSize = arg4;
724 rawdata.addr = reinterpret_cast<void*>(arg3);
725 size_t tagLen = strlen(reinterpret_cast<char*>(arg5)) + 1;
726 if (memcpy_s(rawdata.tname, sizeof(rawdata.tname), reinterpret_cast<char*>(arg5), tagLen) != EOK) {
727 HILOG_ERROR(LOG_CORE, "memcpy_s tag failed");
728 }
729 rawdata.tname[sizeof(rawdata.tname) - 1] = '\0';
730 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
731 std::chrono::time_point<std::chrono::steady_clock> timeout =
732 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
733 if (!lck.try_lock_until(timeout)) {
734 HILOG_ERROR(LOG_CORE, "lock failed!");
735 return ret;
736 }
737 if (g_hookClient != nullptr) {
738 g_hookClient->SendStack(&rawdata, sizeof(rawdata));
739 }
740 }
741 return ret;
742 }
743
ohos_malloc_hook_initialize(const MallocDispatchType * malloc_dispatch,bool *,const char *)744 bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*)
745 {
746 g_dispatch.store(malloc_dispatch);
747 InititalizeIPC();
748 return true;
749 }
ohos_malloc_hook_finalize(void)750 void ohos_malloc_hook_finalize(void)
751 {
752 FinalizeIPC();
753 }
754
ohos_malloc_hook_malloc(size_t size)755 void* ohos_malloc_hook_malloc(size_t size)
756 {
757 __set_hook_flag(false);
758 void* ret = hook_malloc(GetDispatch()->malloc, size);
759 __set_hook_flag(true);
760 return ret;
761 }
762
ohos_malloc_hook_realloc(void * ptr,size_t size)763 void* ohos_malloc_hook_realloc(void* ptr, size_t size)
764 {
765 __set_hook_flag(false);
766 void* ret = hook_realloc(GetDispatch()->realloc, ptr, size);
767 __set_hook_flag(true);
768 return ret;
769 }
770
ohos_malloc_hook_calloc(size_t number,size_t size)771 void* ohos_malloc_hook_calloc(size_t number, size_t size)
772 {
773 __set_hook_flag(false);
774 void* ret = hook_calloc(GetDispatch()->calloc, number, size);
775 __set_hook_flag(true);
776 return ret;
777 }
778
ohos_malloc_hook_valloc(size_t size)779 void* ohos_malloc_hook_valloc(size_t size)
780 {
781 __set_hook_flag(false);
782 void* ret = hook_valloc(GetDispatch()->valloc, size);
783 __set_hook_flag(true);
784 return ret;
785 }
786
ohos_malloc_hook_free(void * p)787 void ohos_malloc_hook_free(void* p)
788 {
789 __set_hook_flag(false);
790 hook_free(GetDispatch()->free, p);
791 __set_hook_flag(true);
792 }
793
ohos_malloc_hook_memalign(size_t alignment,size_t bytes)794 void* ohos_malloc_hook_memalign(size_t alignment, size_t bytes)
795 {
796 __set_hook_flag(false);
797 void* ret = hook_memalign(GetDispatch()->memalign, alignment, bytes);
798 __set_hook_flag(true);
799 return ret;
800 }
801
ohos_malloc_hook_malloc_usable_size(void * mem)802 size_t ohos_malloc_hook_malloc_usable_size(void* mem)
803 {
804 __set_hook_flag(false);
805 size_t ret = hook_malloc_usable_size(GetDispatch()->malloc_usable_size, mem);
806 __set_hook_flag(true);
807 return ret;
808 }
809
ohos_malloc_hook_get_hook_flag(void)810 bool ohos_malloc_hook_get_hook_flag(void)
811 {
812 return pthread_getspecific(g_disableHookFlag) == nullptr;
813 }
814
ohos_malloc_hook_set_hook_flag(bool flag)815 bool ohos_malloc_hook_set_hook_flag(bool flag)
816 {
817 bool oldFlag = ohos_malloc_hook_get_hook_flag();
818 if (flag) {
819 pthread_setspecific(g_disableHookFlag, nullptr);
820 } else {
821 pthread_setspecific(g_disableHookFlag, reinterpret_cast<void *>(1));
822 }
823 return oldFlag;
824 }
825
ohos_malloc_hook_mmap(void * addr,size_t length,int prot,int flags,int fd,off_t offset)826 void* ohos_malloc_hook_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset)
827 {
828 __set_hook_flag(false);
829 void* ret = hook_mmap(GetDispatch()->mmap, addr, length, prot, flags, fd, offset);
830 __set_hook_flag(true);
831 return ret;
832 }
833
ohos_malloc_hook_munmap(void * addr,size_t length)834 int ohos_malloc_hook_munmap(void* addr, size_t length)
835 {
836 __set_hook_flag(false);
837 int ret = hook_munmap(GetDispatch()->munmap, addr, length);
838 __set_hook_flag(true);
839 return ret;
840 }
841
ohos_malloc_hook_memtag(void * addr,size_t size,char * tag,size_t tagLen)842 void ohos_malloc_hook_memtag(void* addr, size_t size, char* tag, size_t tagLen)
843 {
844 __set_hook_flag(false);
845 if (IsPidChanged()) {
846 return;
847 }
848 StackRawData rawdata = {{{0}}};
849 clock_gettime(CLOCK_REALTIME, &rawdata.ts);
850 rawdata.type = MEMORY_TAG;
851 rawdata.pid = getpid();
852 rawdata.tid = static_cast<uint32_t>(GetCurThreadId());
853 rawdata.mallocSize = size;
854 rawdata.addr = addr;
855
856 if (memcpy_s(rawdata.tname, sizeof(rawdata.tname), tag, tagLen) != EOK) {
857 HILOG_ERROR(LOG_CORE, "memcpy_s tag failed");
858 }
859 rawdata.tname[sizeof(rawdata.tname) - 1] = '\0';
860
861 std::unique_lock<std::recursive_timed_mutex> lck(g_ClientMutex, std::defer_lock);
862 std::chrono::time_point<std::chrono::steady_clock> timeout =
863 std::chrono::steady_clock::now() + std::chrono::milliseconds(TIMEOUT_MSEC);
864 if (!lck.try_lock_until(timeout)) {
865 HILOG_ERROR(LOG_CORE, "lock failed!");
866 return;
867 }
868 if (g_hookClient != nullptr) {
869 g_hookClient->SendStack(&rawdata, sizeof(rawdata));
870 }
871 __set_hook_flag(true);
872 }
873
ohos_set_filter_size(size_t size,void * ret)874 bool ohos_set_filter_size(size_t size, void* ret)
875 {
876 if ((size < g_minSize) || (size > g_maxSize)) {
877 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex);
878 g_mallocIgnoreSet.insert(ret);
879 return false;
880 }
881 return true;
882 }
883
IsPidChanged(void)884 static bool IsPidChanged(void)
885 {
886 if (g_isPidChanged) {
887 return true;
888 }
889 int pid = getpid();
890 // hap app after pid namespace used
891 if (pid == PID_NAMESPACE_ID) {
892 return false;
893 } else {
894 // native app & sa service
895 g_isPidChanged = (g_hookPid != pid);
896 }
897 return g_isPidChanged;
898 }