1 /*
2 * Copyright (c) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <atomic>
17 #include <climits>
18 #include <dlfcn.h>
19 #include <fcntl.h>
20 #include <string>
21 #include <sys/time.h>
22
23 #include "hook_socket_client.h"
24 #include "musl_preinit_common.h"
25 #include "stack_writer.h"
26 #include "runtime_stack_range.h"
27 #include "register.h"
28 #include "virtual_runtime.h"
29 #include "get_thread_id.h"
30 #include "hook_client.h"
31
32 static __thread bool ohos_malloc_hook_enable_hook_flag = true;
33
34 namespace {
35 std::shared_ptr<HookSocketClient> g_hookClient;
36 std::recursive_mutex g_ClientMutex;
37 std::atomic<const MallocDispatchType*> g_dispatch {nullptr};
38
GetDispatch()39 const MallocDispatchType* GetDispatch()
40 {
41 return g_dispatch.load(std::memory_order_relaxed);
42 }
43
InititalizeIPC()44 bool InititalizeIPC()
45 {
46 return true;
47 }
FinalizeIPC()48 void FinalizeIPC() { }
49 } // namespace
50
ohos_malloc_hook_on_start(void)51 bool ohos_malloc_hook_on_start(void)
52 {
53 std::lock_guard<std::recursive_mutex> guard(g_ClientMutex);
54
55 if (g_hookClient == nullptr) {
56 g_hookClient = std::make_shared<HookSocketClient>(getpid());
57 }
58 return true;
59 }
60
ohos_malloc_hook_on_end(void)61 bool ohos_malloc_hook_on_end(void)
62 {
63 std::lock_guard<std::recursive_mutex> guard(g_ClientMutex);
64 g_hookClient = nullptr;
65
66 return true;
67 }
68
hook_malloc(void * (* fn)(size_t),size_t size)69 void* hook_malloc(void* (*fn)(size_t), size_t size)
70 {
71 void* ret = nullptr;
72 if (fn) {
73 ret = fn(size);
74 }
75
76 if (size < g_filterSize) {
77 return ret;
78 }
79
80 int regCount = OHOS::Developtools::NativeDaemon::RegisterGetCount();
81 if (regCount <= 0) {
82 return ret;
83 }
84 uint32_t* regs = new (std::nothrow) uint32_t[regCount];
85 if (!regs) {
86 HILOG_ERROR(LOG_CORE, "new regs failed");
87 return ret;
88 }
89
90 #if defined(__arm__)
91 asm volatile(
92 "mov r3, r13\n"
93 "mov r4, r15\n"
94 "stmia %[base], {r3-r4}\n"
95 : [ base ] "+r"(regs)
96 :
97 : "r3", "r4", "memory");
98 #endif
99 const char* stackptr = reinterpret_cast<const char*>(regs[OHOS::Developtools::NativeDaemon::RegisterGetSP()]);
100 char* stackendptr = nullptr;
101 GetRuntimeStackEnd(stackptr, &stackendptr); // stack end pointer
102 int stackSize = stackendptr - stackptr;
103 pid_t pid = getpid();
104 pid_t tid = get_thread_id();
105
106 struct timespec ts = {};
107 clock_gettime(CLOCK_REALTIME, &ts);
108
109 uint32_t type = 0;
110
111 size_t metaSize = sizeof(ts) + sizeof(type) + sizeof(size) + sizeof(void *)
112 + sizeof(stackSize) + stackSize + sizeof(pid_t) + sizeof(pid_t) + regCount * sizeof(uint32_t);
113 std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(metaSize);
114 size_t totalSize = metaSize;
115
116 if (memcpy_s(buffer.get(), totalSize, &ts, sizeof(ts)) != EOK) {
117 HILOG_ERROR(LOG_CORE, "memcpy_s ts failed");
118 }
119 metaSize = sizeof(ts);
120 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &type, sizeof(type)) != EOK) {
121 HILOG_ERROR(LOG_CORE, "memcpy_s type failed");
122 }
123 metaSize += sizeof(type);
124 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &size, sizeof(size)) != EOK) {
125 HILOG_ERROR(LOG_CORE, "memcpy_s size failed");
126 }
127 metaSize += sizeof(size);
128 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &ret, sizeof(void *)) != EOK) {
129 HILOG_ERROR(LOG_CORE, "memcpy_s ret failed");
130 }
131 metaSize += sizeof(void *);
132 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &stackSize, sizeof(stackSize)) != EOK) {
133 HILOG_ERROR(LOG_CORE, "memcpy_s stackSize failed");
134 }
135 metaSize += sizeof(stackSize);
136 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, stackptr, stackSize) != EOK) {
137 HILOG_ERROR(LOG_CORE, "memcpy_s stackptr failed");
138 }
139 metaSize += stackSize;
140 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &pid, sizeof(pid) != EOK)) {
141 HILOG_ERROR(LOG_CORE, "memcpy_s stackptr failed");
142 }
143 metaSize += sizeof(pid_t);
144 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &tid, sizeof(tid)) != EOK) {
145 HILOG_ERROR(LOG_CORE, "memcpy_s tid failed");
146 }
147 metaSize += sizeof(pid_t);
148 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, regs, regCount * sizeof(uint32_t)) != EOK) {
149 HILOG_ERROR(LOG_CORE, "memcpy_s regs failed");
150 }
151 metaSize += regCount * sizeof(uint32_t);
152 delete[] regs;
153
154 std::lock_guard<std::recursive_mutex> guard(g_ClientMutex);
155 if (g_hookClient != nullptr) {
156 g_hookClient->SendStack(buffer.get(), metaSize);
157 }
158 return ret;
159 }
160
hook_valloc(void * (* fn)(size_t),size_t size)161 void* hook_valloc(void* (*fn)(size_t), size_t size)
162 {
163 void* pRet = nullptr;
164 if (fn) {
165 pRet = fn(size);
166 }
167 return pRet;
168 }
169
hook_calloc(void * (* fn)(size_t,size_t),size_t number,size_t size)170 void* hook_calloc(void* (*fn)(size_t, size_t), size_t number, size_t size)
171 {
172 void* pRet = nullptr;
173 if (fn) {
174 pRet = fn(number, size);
175 }
176 return pRet;
177 }
178
hook_memalign(void * (* fn)(size_t,size_t),size_t align,size_t bytes)179 void* hook_memalign(void* (*fn)(size_t, size_t), size_t align, size_t bytes)
180 {
181 void* pRet = nullptr;
182 if (fn) {
183 pRet = fn(align, bytes);
184 }
185 return pRet;
186 }
187
hook_realloc(void * (* fn)(void *,size_t),void * ptr,size_t size)188 void* hook_realloc(void* (*fn)(void *, size_t), void* ptr, size_t size)
189 {
190 void* pRet = nullptr;
191 if (fn) {
192 pRet = fn(ptr, size);
193 }
194
195 return pRet;
196 }
197
hook_malloc_usable_size(size_t (* fn)(void *),void * ptr)198 size_t hook_malloc_usable_size(size_t (*fn)(void*), void* ptr)
199 {
200 size_t ret = 0;
201 if (fn) {
202 ret = fn(ptr);
203 }
204
205 return ret;
206 }
207
hook_free(void (* free_func)(void *),void * p)208 void hook_free(void (*free_func)(void*), void *p)
209 {
210 if (free_func) {
211 free_func(p);
212 }
213
214 int regCount = OHOS::Developtools::NativeDaemon::RegisterGetCount();
215 if (regCount <= 0) {
216 return;
217 }
218 uint32_t* regs = new (std::nothrow) uint32_t[regCount];
219 if (!regs) {
220 HILOG_ERROR(LOG_CORE, "new regs failed");
221 return;
222 }
223 #if defined(__arm__)
224 asm volatile(
225 "mov r3, r13\n"
226 "mov r4, r15\n"
227 "stmia %[base], {r3-r4}\n"
228 : [ base ] "+r"(regs)
229 :
230 : "r3", "r4", "memory");
231 #endif
232 const char* stackptr = reinterpret_cast<const char*>(regs[OHOS::Developtools::NativeDaemon::RegisterGetSP()]);
233 char* stackendptr = nullptr;
234 GetRuntimeStackEnd(stackptr, &stackendptr); // stack end pointer
235 int stackSize = stackendptr - stackptr;
236 pid_t tid = get_thread_id();
237 pid_t pid = getpid();
238 uint32_t type = 1;
239 struct timespec ts = {};
240 clock_gettime(CLOCK_REALTIME, &ts);
241
242 size_t metaSize = sizeof(ts) + sizeof(type) + sizeof(uint32_t) + sizeof(void *)
243 + sizeof(stackSize) + stackSize + sizeof(pid_t) + sizeof(pid_t) + regCount * sizeof(uint32_t);
244 std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(metaSize);
245 int totalSize = metaSize;
246
247 if (memcpy_s(buffer.get(), totalSize, &ts, sizeof(ts)) != EOK) {
248 HILOG_ERROR(LOG_CORE, "memcpy_s ts failed");
249 }
250 metaSize = sizeof(ts);
251 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &type, sizeof(type)) != EOK) {
252 HILOG_ERROR(LOG_CORE, "memcpy_s type failed");
253 }
254 metaSize += sizeof(type);
255 if (memset_s(buffer.get() + metaSize, totalSize - metaSize, 0, sizeof(uint32_t)) != EOK) {
256 HILOG_ERROR(LOG_CORE, "memset_s data failed");
257 }
258 metaSize += sizeof(uint32_t);
259 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &p, sizeof(void *)) != EOK) {
260 HILOG_ERROR(LOG_CORE, "memcpy_s ptr failed");
261 }
262 metaSize += sizeof(void *);
263 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &stackSize, sizeof(stackSize)) != EOK) {
264 HILOG_ERROR(LOG_CORE, "memcpy_s stackSize failed");
265 }
266 metaSize += sizeof(stackSize);
267 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, stackptr, stackSize) != EOK) {
268 HILOG_ERROR(LOG_CORE, "memcpy_s stackptr failed");
269 }
270 metaSize += stackSize;
271 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &pid, sizeof(pid)) != EOK) {
272 HILOG_ERROR(LOG_CORE, "memcpy_s pid failed");
273 }
274 metaSize += sizeof(pid_t);
275 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, &tid, sizeof(tid)) != EOK) {
276 HILOG_ERROR(LOG_CORE, "memcpy_s tid failed");
277 }
278 metaSize += sizeof(pid_t);
279 if (memcpy_s(buffer.get() + metaSize, totalSize - metaSize, regs, regCount * sizeof(uint32_t)) != EOK) {
280 HILOG_ERROR(LOG_CORE, "memcpy_s regs failed");
281 }
282 metaSize += regCount * sizeof(uint32_t);
283
284 delete[] regs;
285 std::lock_guard<std::recursive_mutex> guard(g_ClientMutex);
286 if (g_hookClient != nullptr) {
287 g_hookClient->SendStack(buffer.get(), metaSize);
288 }
289 }
290
ohos_malloc_hook_initialize(const MallocDispatchType * malloc_dispatch,bool *,const char *)291 bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*)
292 {
293 g_dispatch.store(malloc_dispatch);
294 InititalizeIPC();
295 return true;
296 }
ohos_malloc_hook_finalize(void)297 void ohos_malloc_hook_finalize(void)
298 {
299 FinalizeIPC();
300 }
301
ohos_malloc_hook_malloc(size_t size)302 void* ohos_malloc_hook_malloc(size_t size)
303 {
304 __set_hook_flag(false);
305 void* ret = hook_malloc(GetDispatch()->malloc, size);
306 __set_hook_flag(true);
307 return ret;
308 }
309
ohos_malloc_hook_realloc(void * ptr,size_t size)310 void* ohos_malloc_hook_realloc(void* ptr, size_t size)
311 {
312 __set_hook_flag(false);
313 void* ret = hook_realloc(GetDispatch()->realloc, ptr, size);
314 __set_hook_flag(true);
315 return ret;
316 }
317
ohos_malloc_hook_calloc(size_t number,size_t size)318 void* ohos_malloc_hook_calloc(size_t number, size_t size)
319 {
320 __set_hook_flag(false);
321 void* ret = hook_calloc(GetDispatch()->calloc, number, size);
322 __set_hook_flag(true);
323 return ret;
324 }
325
ohos_malloc_hook_valloc(size_t size)326 void* ohos_malloc_hook_valloc(size_t size)
327 {
328 __set_hook_flag(false);
329 void* ret = hook_valloc(GetDispatch()->valloc, size);
330 __set_hook_flag(true);
331 return ret;
332 }
333
ohos_malloc_hook_free(void * p)334 void ohos_malloc_hook_free(void* p)
335 {
336 __set_hook_flag(false);
337 hook_free(GetDispatch()->free, p);
338 __set_hook_flag(true);
339 }
340
ohos_malloc_hook_memalign(size_t alignment,size_t bytes)341 void* ohos_malloc_hook_memalign(size_t alignment, size_t bytes)
342 {
343 __set_hook_flag(false);
344 void* ret = hook_memalign(GetDispatch()->memalign, alignment, bytes);
345 __set_hook_flag(true);
346 return ret;
347 }
348
ohos_malloc_hook_malloc_usable_size(void * mem)349 size_t ohos_malloc_hook_malloc_usable_size(void* mem)
350 {
351 __set_hook_flag(false);
352 size_t ret = hook_malloc_usable_size(GetDispatch()->malloc_usable_size, mem);
353 __set_hook_flag(true);
354 return ret;
355 }
356
ohos_malloc_hook_get_hook_flag(void)357 bool ohos_malloc_hook_get_hook_flag(void)
358 {
359 return ohos_malloc_hook_enable_hook_flag;
360 }
361
ohos_malloc_hook_set_hook_flag(bool flag)362 bool ohos_malloc_hook_set_hook_flag(bool flag)
363 {
364 bool before_lag = ohos_malloc_hook_enable_hook_flag;
365 ohos_malloc_hook_enable_hook_flag = flag;
366 return before_lag;
367 }
368