1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #define _GNU_SOURCE
17 #include <sys/mman.h>
18 #include <sys/prctl.h>
19 #include "cfi.h"
20 #include "ld_log.h"
21 #include "namespace.h"
22
23 /* This module provides support for LLVM CFI Cross-DSO by implementing the __cfi_slowpath() and __cfi_slowpath_diag()
24 * functions. These two functions will be called before visiting other dso's resources. The responsibility is to
25 * calculate the __cfi_check() of the target dso, and call it. So use CFI shadow and shadow value to store the
26 * relationship between dso and its __cfi_check addr while loading a dso. CFI shadow is an array which stores shadow
27 * values. Shadow value is used to store the relationship. A shadow value can map 1 LIBRARY_ALIGNMENT memory range. So
28 * each dso will be mapped to one or more shadow values in the CFI shadow, this depends on the address range of the
29 * dso.
30 * There are 3 types for shadow value:
31 * - invalid(0) : the target addr does not belongs to any loaded dso.
32 * - uncheck(1) : this LIBRARY_ALIGNMENT memory range belongs to a dso but it is no need to do the CFI check.
33 * - valid(2 - 0xFFFF) : this LIBRARY_ALIGNMENT memory range belongs to a dso and need to do the CFI check.
34 * The valid shadow value records the distance from the end of a LIBRARY_ALIGNMENT memory range to the __cfi_check addr
35 * of the dso (The unit is 4096, because the __cfi_check is aligned with 4096).
36 * The valid shadow value is calculated as below:
37 * sv = (AlignUp(__cfi_check, LIBRARY_ALIGNMENT) - __cfi_check + N * LIBRARY_ALIGNMENT) / 4096 + 2;
38 *
39 * N : starts at 0, is the index of LIBRARY_ALIGNMENT memory range that belongs to a dso.
40 * + 2 : to avoid conflict with invalid and uncheck shadow value.
41 *
42 * Below is a example for calculating shadow values of a dso.
43 * liba.so
44 * /\
45 * /'''''''''''''''''''''''''''''''''''' '''''''''''''''''''''''''''''''''''''\
46 * 0x40000 __cfi_check addr = 0x42000 0x80000 0xA0000 0xC0000
47 * +---------^----------------------------------------^-------------------------^-------------------------+
48 * Memory | | | | |
49 * +------------------------------------------------------------------------------------------------------+
50 * \........... LIBRARY_ALIGNMENT ..................../\........... LIBRARY_ALIGNMENT ..................../
51 * \ / /
52 * \ / /
53 * \ / /
54 * \ / /
55 * \ / /
56 * +-----------------------------------------------------------------------------------------------------+
57 * CFI shadow | invalid | sv1 | sv2 | invalid |
58 * +-----------------------------------------------------------------------------------------------------+
59 * sv1 = (0x80000 - 0x42000 + 0 * LIBRARY_ALIGNMENT) / 4096 + 2 = 64
60 * sv2 = (0x80000 - 0x42000 + 1 * LIBRARY_ALIGNMENT) / 4096 + 2 = 126
61 *
62 * Calculating the __cfi_check address is a reverse process:
63 * - First align up the target addr with LIBRARY_ALIGNMENT to locate the corresponding shadow value.
64 * - Then calculate the __cfi_check addr.
65 *
66 * In order for the algorithm to work well, the start addr of each dso should be aligned with LIBRARY_ALIGNMENT. */
67
68 #define MAX(a,b) (((a) > (b)) ? (a) : (b))
69 #define MIN(a,b) (((a) < (b)) ? (a) : (b))
70 #define ALIGN_UP(a, b) (((a) + (b) - 1) & -(b))
71 #define ALIGN_DOWN(a, b) ((a) & -(b))
72 #if DL_FDPIC
73 #define LADDR(p, v) laddr((p), (v))
74 #else
75 #define LADDR(p, v) (void *)((p)->base + (v))
76 #endif
77
78 /* Function ptr for __cfi_check() */
79 typedef int (*cfi_check_t)(uint64_t, void *, void *);
80
81 static const uintptr_t shadow_granularity = LIBRARY_ALIGNMENT_BITS;
82 static const uintptr_t cfi_check_granularity = 12;
83 static const uintptr_t shadow_alignment = 1UL << shadow_granularity;
84 static uintptr_t shadow_size = 0;
85 /* Start addr of the CFI shadow */
86 static char *cfi_shadow_start = NULL;
87 /* List head of all the DSOs loaded by the process */
88 static struct dso *dso_list_head = NULL;
89 static struct dso *pldso = NULL;
90
91 /* Shadow value */
92 /* The related shadow value(s) will be set to `sv_invalid` when:
93 * - init CFI shadow.
94 * - removing a dso. */
95 static const uint16_t sv_invalid = 0;
96 /* The related shadow value(s) will be set to `sv_uncheck` if:
97 * - the DSO does not enable CFI Cross-Dso.
98 * - the DSO enabled CFI Cross-Dso, but this DSO is larger than 16G, for the part of the dso that exceeds 16G,
99 * its shadow value will be set to `sv_uncheck`. */
100 static const uint16_t sv_uncheck = 1;
101 /* If a DSO enabled CFI Cross-Dso, the DSO's shadow value should be valid. Because of the defination of `sv_invalid`
102 * and `sv_unchecked`, the valid shadow value should be at least 2. */
103 static const uint16_t sv_valid_min = 2;
104
105 #if defined(__LP64__)
106 static const uintptr_t max_target_addr = 0xffffffffffff;
107 #else
108 static const uintptr_t max_target_addr = 0xffffffff;
109 #endif
110
111 /* Create a cfi shadow */
112 static int create_cfi_shadow(void);
113
114 /* Map dsos to CFI shadow */
115 static int add_dso_to_cfi_shadow(struct dso *dso);
116 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type);
117
118 /* Find the __cfi_check() of target dso and call it */
119 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr);
120 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data);
121
addr_to_offset(uintptr_t addr,int bits)122 static inline uintptr_t addr_to_offset(uintptr_t addr, int bits)
123 {
124 /* Convert addr to CFI shadow offset.
125 * Shift left 1 bit because the shadow value is uint16_t. */
126 return (addr >> bits) << 1;
127 }
128
find_cfi_check_sym(struct dso * p)129 static struct symdef find_cfi_check_sym(struct dso *p)
130 {
131 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
132
133 struct verinfo verinfo = { .s = "__cfi_check", .v = "", .use_vna_hash = false };
134 struct sym_info_pair s_info_p = gnu_hash(verinfo.s);
135 return find_sym_impl(p, &verinfo, s_info_p, 0, p->namespace);
136 }
137
is_addr_in_ldso(size_t a)138 static int is_addr_in_ldso(size_t a)
139 {
140 size_t i = 0;
141 if (DL_FDPIC) {
142 i = count_syms(pldso);
143 if (a - (size_t)pldso->funcdescs < i * sizeof(*pldso->funcdescs))
144 return 1;
145 }
146 if (DL_FDPIC && pldso->loadmap) {
147 for (i = 0; i < pldso->loadmap->nsegs; i++) {
148 if (a-pldso->loadmap->segs[i].p_vaddr
149 < pldso->loadmap->segs[i].p_memsz)
150 return 1;
151 }
152 } else {
153 Phdr *ph = pldso->phdr;
154 size_t phcnt = pldso->phnum;
155 size_t entsz = pldso->phentsize;
156 size_t base = (size_t)pldso->base;
157 for (; phcnt--; ph = (void *)((char *)ph + entsz)) {
158 if (ph->p_type != PT_LOAD) continue;
159 if (a - base - ph->p_vaddr < ph->p_memsz)
160 return 1;
161 }
162 if (a - (size_t)pldso->map < pldso->map_len)
163 return 0;
164 }
165 return 0;
166 }
167
get_cfi_check_addr(uint16_t value,void * func_ptr)168 static uintptr_t get_cfi_check_addr(uint16_t value, void* func_ptr)
169 {
170 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
171
172 uintptr_t addr = (uintptr_t)func_ptr;
173 uintptr_t aligned_addr = ALIGN_DOWN(addr, shadow_alignment) + shadow_alignment;
174 uintptr_t cfi_check_func_addr = aligned_addr - ((uintptr_t)(value - sv_valid_min) << cfi_check_granularity);
175 #ifdef __arm__
176 LD_LOGD("[CFI] [%{public}s] __arm__ defined!\n", __FUNCTION__);
177 cfi_check_func_addr++;
178 #endif
179 LD_LOGD("[CFI] [%{public}s] cfi_check_func_addr[%{public}p] in dso[%{public}s]\n",
180 __FUNCTION__, cfi_check_func_addr, ((struct dso *)addr2dso((size_t)cfi_check_func_addr))->name);
181
182 return cfi_check_func_addr;
183 }
184
cfi_slowpath_common(uint64_t call_site_type_id,void * func_ptr,void * diag_data)185 static inline void cfi_slowpath_common(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
186 {
187 uint16_t value = sv_invalid;
188
189 if (func_ptr == NULL) {
190 return;
191 }
192
193 #if defined(__aarch64__)
194 LD_LOGD("[CFI] [%{public}s] __aarch64__ defined!\n", __FUNCTION__);
195 uintptr_t addr = (uintptr_t)func_ptr & ((1ULL << 56) - 1);
196 #else
197 LD_LOGD("[CFI] [%{public}s] __aarch64__ not defined!\n", __FUNCTION__);
198 uintptr_t addr = func_ptr;
199 #endif
200
201 /* Get shadow value */
202 uintptr_t offset = addr_to_offset(addr, shadow_granularity);
203
204 if (cfi_shadow_start == NULL) {
205 LD_LOGE("[CFI] [%{public}s] the cfi_shadow_start is null!\n", __FUNCTION__);
206 __builtin_trap();
207 }
208
209 if (offset > shadow_size) {
210 value = sv_invalid;
211 } else {
212 value = *((uint16_t*)(cfi_shadow_start + offset));
213 }
214 LD_LOGD("[CFI] [%{public}s] called from %{public}s to %{public}s func_ptr:0x%{public}p shadow value:%{public}d diag_data:0x%{public}p call_site_type_id[%{public}p.\n",
215 __FUNCTION__,
216 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
217 ((struct dso *)addr2dso((size_t)func_ptr))->name,
218 func_ptr, value, diag_data, call_site_type_id);
219
220 struct dso *dso = NULL;
221 switch (value)
222 {
223 case sv_invalid:
224 /* The ldso is an exception because it is loaded by kernel and is not mapped to the CFI shadow.
225 * Do not check it. */
226 if (is_addr_in_ldso((size_t)func_ptr)) {
227 LD_LOGI("[CFI] [%{public}s] uncheck for ldso\n", __FUNCTION__);
228 return;
229 }
230
231 dso = (struct dso *)addr2dso((size_t)__builtin_return_address(0));
232 if (dso == NULL) {
233 LD_LOGE("[CFI] [%{public}s] can not find the dso!\n", __FUNCTION__);
234 __builtin_trap();
235 }
236 LD_LOGD("[CFI] [%{public}s] dso name[%{public}s]!\n", __FUNCTION__, dso->name);
237
238 struct symdef cfi_check_sym = find_cfi_check_sym(dso);
239 if (!cfi_check_sym.sym) {
240 LD_LOGE("[CFI] [%{public}s] can not find the __cfi_check in the dso!\n", __FUNCTION__);
241 __builtin_trap();
242 }
243 LD_LOGD("[CFI] [%{public}s] cfi_check addr[%{public}p]!\n", __FUNCTION__,
244 LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value));
245 ((cfi_check_t)LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value))(call_site_type_id, func_ptr, diag_data);
246 break;
247 case sv_uncheck:
248 break;
249 default:
250 ((cfi_check_t)get_cfi_check_addr(value, func_ptr))(call_site_type_id, func_ptr, diag_data);
251 break;
252 }
253
254 return;
255 }
256
init_cfi_shadow(struct dso * dso_list,struct dso * ldso)257 int init_cfi_shadow(struct dso *dso_list, struct dso *ldso)
258 {
259 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
260
261 if (dso_list == NULL) {
262 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
263 return CFI_SUCCESS;
264 }
265
266 /* Save the head node of dso list */
267 dso_list_head = dso_list;
268 pldso = ldso;
269
270 return map_dso_to_cfi_shadow(dso_list);
271 }
272
map_dso_to_cfi_shadow(struct dso * dso)273 int map_dso_to_cfi_shadow(struct dso *dso)
274 {
275 bool has_cfi_check = false;
276
277 if (dso == NULL) {
278 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
279 return CFI_SUCCESS;
280 }
281
282 /* If the cfi shadow does not exist, create it and map all the dsos and its dependents to it. */
283 if (cfi_shadow_start == NULL) {
284 /* Find __cfi_check symbol in dso list */
285 for (struct dso *p = dso; p; p = p->next) {
286 if (find_cfi_check_sym(p).sym) {
287 LD_LOGD("[CFI] [%{public}s] find __cfi_check function in dso %{public}s!\n", __FUNCTION__, p->name);
288 has_cfi_check = true;
289 break;
290 }
291 }
292
293 if (has_cfi_check) {
294 if (create_cfi_shadow() == CFI_FAILED) {
295 LD_LOGE("[CFI] [%{public}s] create cfi shadow failed!\n", __FUNCTION__);
296 return CFI_FAILED;
297 }
298 add_dso_to_cfi_shadow(dso_list_head);
299 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
300 }
301 /* If the cfi shadow exists, map the current dso and its dependents to it. */
302 } else {
303 add_dso_to_cfi_shadow(dso);
304 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
305 }
306
307 return CFI_SUCCESS;
308 }
309
unmap_dso_from_cfi_shadow(struct dso * dso)310 void unmap_dso_from_cfi_shadow(struct dso *dso)
311 {
312 if (dso == NULL) {
313 LD_LOGD("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
314 return;
315 }
316
317 LD_LOGD("[CFI] [%{public}s] unmap dso %{public}s from shadow!\n", __FUNCTION__, dso->name);
318
319 if (cfi_shadow_start == NULL)
320 return;
321
322 if (dso->map == 0 || dso->map_len == 0)
323 return;
324
325 if (dso->is_mapped_to_shadow == false)
326 return;
327
328 /* Set the dso's shadow value as invalid. */
329 fill_shadow_value_to_shadow(dso->map, dso->map + dso->map_len, 0, sv_invalid);
330 dso->is_mapped_to_shadow = false;
331 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
332
333 return;
334 }
335
create_cfi_shadow(void)336 static int create_cfi_shadow(void)
337 {
338 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
339
340 /* Each process can load up to (max_target_addr >> shadow_granularity) dsos. Shift left 1 bit because the shadow
341 * value is uint16_t. The size passed to mmap() should be aligned with 4096, so shadow_size should be aligned. */
342 shadow_size = ALIGN_UP(((max_target_addr >> shadow_granularity) << 1), PAGE_SIZE);
343
344 uintptr_t *mmap_addr = mmap(NULL, shadow_size, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
345
346 if (mmap_addr == MAP_FAILED) {
347 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
348 return CFI_FAILED;
349 }
350
351 cfi_shadow_start = (char*)mmap_addr;
352 LD_LOGD("[CFI] [%{public}s] the cfi_shadow_start addr is %{public}p!\n", __FUNCTION__, cfi_shadow_start);
353
354 return CFI_SUCCESS;
355 }
356
add_dso_to_cfi_shadow(struct dso * dso)357 static int add_dso_to_cfi_shadow(struct dso *dso)
358 {
359 LD_LOGD("[CFI] [%{public}s] start with %{public}s !\n", __FUNCTION__, dso->name);
360 for (struct dso *p = dso; p; p = p->next) {
361 LD_LOGD("[CFI] [%{public}s] adding %{public}s to cfi shadow!\n", __FUNCTION__, p->name);
362 if (p->map == 0 || p->map_len == 0) {
363 LD_LOGW("[CFI] [%{public}s] the dso has no data! map[%{public}p] map_len[0x%{public}x]\n",
364 __FUNCTION__, p->map, p->map_len);
365 continue;
366 }
367
368 if (p->is_mapped_to_shadow == true) {
369 LD_LOGW("[CFI] [%{public}s] %{public}s is already in shadow!\n", __FUNCTION__, p->name);
370 continue;
371 }
372
373 struct symdef cfi_check_sym = find_cfi_check_sym(p);
374 /* If the dso doesn't have __cfi_check(), set it's shadow value unchecked. */
375 if (!cfi_check_sym.sym) {
376 LD_LOGD("[CFI] [%{public}s] %{public}s has no __cfi_check()!\n", __FUNCTION__, p->name);
377 if (fill_shadow_value_to_shadow(p->map, p->map + p->map_len, 0, sv_uncheck) == CFI_FAILED) {
378 LD_LOGE("[CFI] [%{public}s] add dso to cfi shadow failed!\n", __FUNCTION__);
379 return CFI_FAILED;
380 }
381 /* If the dso has __cfi_check(), set it's shadow value valid. */
382 } else {
383 LD_LOGD("[CFI] [%{public}s] %{public}s has __cfi_check()!\n", __FUNCTION__, p->name);
384 uintptr_t end = p->map + p->map_len;
385 uintptr_t cfi_check = LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value);
386
387 if (cfi_check == 0) {
388 LD_LOGE("[CFI] [%{public}s] %{public}s has null cfi_check func!\n", __FUNCTION__, p->name);
389 return CFI_FAILED;
390 }
391 if (fill_shadow_value_to_shadow(p->map, end, cfi_check, sv_valid_min) == CFI_FAILED) {
392 LD_LOGE("[CFI] [%{public}s] add %{public}s to cfi shadow failed!\n", __FUNCTION__, p->name);
393 return CFI_FAILED;
394 }
395 }
396 p->is_mapped_to_shadow = true;
397 LD_LOGD("[CFI] [%{public}s] add %{public}s to cfi shadow succeed.\n", __FUNCTION__, p->name);
398 }
399 LD_LOGD("[CFI] [%{public}s] %{public}s done.\n", __FUNCTION__, dso->name);
400
401 return CFI_SUCCESS;
402 }
403
fill_shadow_value_to_shadow(uintptr_t begin,uintptr_t end,uintptr_t cfi_check,uint16_t type)404 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type)
405 {
406 LD_LOGD("[CFI] [%{public}s] begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
407 __FUNCTION__, begin, end, cfi_check, type);
408
409 /* To ensure the atomicity of the CFI shadow operation, we create a temp_shadow, write the shadow value to
410 * the temp_shadow, and then write it back to the CFI shadow by mremap(). */
411 begin = ALIGN_DOWN(MAX(begin, cfi_check), shadow_alignment);
412 char* shadow_begin = cfi_shadow_start + addr_to_offset(begin, LIBRARY_ALIGNMENT_BITS);
413 char* shadow_end = (char*)(((uint16_t*)(cfi_shadow_start + addr_to_offset(end - 1, LIBRARY_ALIGNMENT_BITS))) + 1);
414 char* aligned_shadow_begin = (char*)ALIGN_DOWN((uintptr_t)shadow_begin, PAGE_SIZE);
415 char* aligned_shadow_end = (char*)ALIGN_UP((uintptr_t)shadow_end, PAGE_SIZE);
416
417 uint16_t tmp_shadow_size = aligned_shadow_end - aligned_shadow_begin;
418 uint16_t offset_begin = shadow_begin - aligned_shadow_begin;
419 uint16_t offset_end = shadow_end - aligned_shadow_begin;
420
421 char* tmp_shadow_start = (char*)mmap(NULL, tmp_shadow_size,
422 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
423
424 if (tmp_shadow_start == MAP_FAILED) {
425 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
426 return CFI_FAILED;
427 }
428
429 LD_LOGD("[CFI] [%{public}s] tmp_shadow_start is %{public}p\t tmp_shadow_size is 0x%{public}x!\n",
430 __FUNCTION__, tmp_shadow_start, tmp_shadow_size);
431 memcpy(tmp_shadow_start, aligned_shadow_begin, offset_begin);
432 memcpy(tmp_shadow_start + offset_end, shadow_end, aligned_shadow_end - shadow_end);
433
434 /* If the dso has __cfi_check(), calculate valid shadow value */
435 if (type == sv_valid_min) {
436 #ifdef __arm__
437 uint16_t shadow_value_begin = ((begin + shadow_alignment - (cfi_check - 1))
438 >> cfi_check_granularity) + sv_valid_min;
439 #else
440 uint16_t shadow_value_begin = ((begin + shadow_alignment - cfi_check)
441 >> cfi_check_granularity) + sv_valid_min;
442 #endif
443 LD_LOGD("[CFI] [%{public}s] shadow_value_begin is 0x%{public}x!\n", __FUNCTION__, shadow_value_begin);
444 uint16_t shadow_value_step = 1 << (shadow_granularity - cfi_check_granularity);
445 uint16_t shadow_value = shadow_value_begin;
446
447 /* Set shadow_value */
448 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
449 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
450 /* If a dso is larger than 16G( = max_shadow_value * shadow_alignment / 1G),
451 * the excess is not checked. */
452 if (shadow_value < shadow_value_begin) {
453 *shadow_addr = sv_uncheck;
454 continue;
455 }
456 *shadow_addr = (*shadow_addr == sv_invalid) ? shadow_value : sv_uncheck;
457 shadow_value += shadow_value_step;
458 }
459 /* in these cases, shadow_value will always be sv_uncheck or sv_invalid */
460 } else if (type == sv_uncheck || type == sv_invalid) {
461 /* Set shadow_value */
462 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
463 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
464 *shadow_addr = type;
465 }
466 } else {
467 LD_LOGE("[CFI] [%{public}s] has error param!\n", __FUNCTION__);
468 munmap(tmp_shadow_start, tmp_shadow_size);
469 return CFI_FAILED;
470 }
471
472 mprotect(tmp_shadow_start, tmp_shadow_size, PROT_READ);
473 /* Remap temp_shadow to CFI shadow. */
474 uint16_t* mremap_addr = mremap(tmp_shadow_start, tmp_shadow_size, tmp_shadow_size,
475 MREMAP_MAYMOVE | MREMAP_FIXED, aligned_shadow_begin);
476
477 if (mremap_addr == MAP_FAILED) {
478 LD_LOGE("[CFI] [%{public}s] mremap failed!\n", __FUNCTION__);
479 munmap(tmp_shadow_start, tmp_shadow_size);
480 return CFI_FAILED;
481 }
482
483 LD_LOGD("[CFI] [%{public}s] fill completed!\n", __FUNCTION__);
484 return CFI_SUCCESS;
485 }
486
__cfi_slowpath(uint64_t call_site_type_id,void * func_ptr)487 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr)
488 {
489 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
490 __FUNCTION__,
491 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
492 ((struct dso *)addr2dso((size_t)func_ptr))->name,
493 func_ptr);
494
495 cfi_slowpath_common(call_site_type_id, func_ptr, NULL);
496 return;
497 }
498
__cfi_slowpath_diag(uint64_t call_site_type_id,void * func_ptr,void * diag_data)499 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
500 {
501 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
502 __FUNCTION__,
503 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
504 ((struct dso *)addr2dso((size_t)func_ptr))->name,
505 func_ptr);
506
507 cfi_slowpath_common(call_site_type_id, func_ptr, diag_data);
508 return;
509 }