1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #define _GNU_SOURCE
17 #include <sys/mman.h>
18 #include <sys/prctl.h>
19 #include "cfi.h"
20 #include "ld_log.h"
21 #include "namespace.h"
22
23 /* This module provides support for LLVM CFI Cross-DSO by implementing the __cfi_slowpath() and __cfi_slowpath_diag()
24 * functions. These two functions will be called before visiting other dso's resources. The responsibility is to
25 * calculate the __cfi_check() of the target dso, and call it. So use CFI shadow and shadow value to store the
26 * relationship between dso and its __cfi_check addr while loading a dso. CFI shadow is an array which stores shadow
27 * values. Shadow value is used to store the relationship. A shadow value can map 1 LIBRARY_ALIGNMENT memory range. So
28 * each dso will be mapped to one or more shadow values in the CFI shadow, this depends on the address range of the
29 * dso.
30 * There are 3 types for shadow value:
31 * - invalid(0) : the target addr does not belongs to any loaded dso.
32 * - uncheck(1) : this LIBRARY_ALIGNMENT memory range belongs to a dso but it is no need to do the CFI check.
33 * - valid(2 - 0xFFFF) : this LIBRARY_ALIGNMENT memory range belongs to a dso and need to do the CFI check.
34 * The valid shadow value records the distance from the end of a LIBRARY_ALIGNMENT memory range to the __cfi_check addr
35 * of the dso (The unit is 4096, because the __cfi_check is aligned with 4096).
36 * The valid shadow value is calculated as below:
37 * sv = (AlignUp(__cfi_check, LIBRARY_ALIGNMENT) - __cfi_check + N * LIBRARY_ALIGNMENT) / 4096 + 2;
38 *
39 * N : starts at 0, is the index of LIBRARY_ALIGNMENT memory range that belongs to a dso.
40 * + 2 : to avoid conflict with invalid and uncheck shadow value.
41 *
42 * Below is a example for calculating shadow values of a dso.
43 * liba.so
44 * /\
45 * /'''''''''''''''''''''''''''''''''''' '''''''''''''''''''''''''''''''''''''\
46 * 0x40000 __cfi_check addr = 0x42000 0x80000 0xA0000 0xC0000
47 * +---------^----------------------------------------^-------------------------^-------------------------+
48 * Memory | | | | |
49 * +------------------------------------------------------------------------------------------------------+
50 * \........... LIBRARY_ALIGNMENT ..................../\........... LIBRARY_ALIGNMENT ..................../
51 * \ / /
52 * \ / /
53 * \ / /
54 * \ / /
55 * \ / /
56 * +-----------------------------------------------------------------------------------------------------+
57 * CFI shadow | invalid | sv1 | sv2 | invalid |
58 * +-----------------------------------------------------------------------------------------------------+
59 * sv1 = (0x80000 - 0x42000 + 0 * LIBRARY_ALIGNMENT) / 4096 + 2 = 64
60 * sv2 = (0x80000 - 0x42000 + 1 * LIBRARY_ALIGNMENT) / 4096 + 2 = 126
61 *
62 * Calculating the __cfi_check address is a reverse process:
63 * - First align up the target addr with LIBRARY_ALIGNMENT to locate the corresponding shadow value.
64 * - Then calculate the __cfi_check addr.
65 *
66 * In order for the algorithm to work well, the start addr of each dso should be aligned with LIBRARY_ALIGNMENT. */
67
68 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
69 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
70 #define ALIGN_UP(a, b) (((a) + (b) - 1) & -(b))
71 #define ALIGN_DOWN(a, b) ((a) & -(b))
72 #if DL_FDPIC
73 #define LADDR(p, v) laddr((p), (v))
74 #else
75 #define LADDR(p, v) (void *)((p)->base + (v))
76 #endif
77
78 /* Function ptr for __cfi_check() */
79 typedef int (*cfi_check_t)(uint64_t, void *, void *);
80
81 static const uintptr_t shadow_granularity = LIBRARY_ALIGNMENT_BITS;
82 static const uintptr_t cfi_check_granularity = 12;
83 static const uintptr_t shadow_alignment = 1UL << shadow_granularity;
84 static uintptr_t shadow_size = 0;
85 /* Start addr of the CFI shadow */
86 static char *cfi_shadow_start = NULL;
87 /* List head of all the DSOs loaded by the process */
88 static struct dso *dso_list_head = NULL;
89 static struct dso *pldso = NULL;
90
91 /* Shadow value */
92 /* The related shadow value(s) will be set to `sv_invalid` when:
93 * - init CFI shadow.
94 * - removing a dso. */
95 static const uint16_t sv_invalid = 0;
96 /* The related shadow value(s) will be set to `sv_uncheck` if:
97 * - the DSO does not enable CFI Cross-Dso.
98 * - the DSO enabled CFI Cross-Dso, but this DSO is larger than 16G, for the part of the dso that exceeds 16G,
99 * its shadow value will be set to `sv_uncheck`. */
100 static const uint16_t sv_uncheck = 1;
101 /* If a DSO enabled CFI Cross-Dso, the DSO's shadow value should be valid. Because of the defination of `sv_invalid`
102 * and `sv_unchecked`, the valid shadow value should be at least 2. */
103 static const uint16_t sv_valid_min = 2;
104
105 #if defined(__LP64__)
106 static const uintptr_t max_target_addr = 0xffffffffffff;
107 #else
108 static const uintptr_t max_target_addr = 0xffffffff;
109 #endif
110
111 /* Create a cfi shadow */
112 static int create_cfi_shadow(void);
113
114 /* Map dsos to CFI shadow */
115 static int add_dso_to_cfi_shadow(struct dso *dso);
116 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type);
117
118 /* Find the __cfi_check() of target dso and call it */
119 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr);
120 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data);
121
addr_to_offset(uintptr_t addr,int bits)122 static inline uintptr_t addr_to_offset(uintptr_t addr, int bits)
123 {
124 /* Convert addr to CFI shadow offset.
125 * Shift left 1 bit because the shadow value is uint16_t. */
126 return (addr >> bits) << 1;
127 }
128
find_cfi_check_sym(struct dso * p)129 static struct symdef find_cfi_check_sym(struct dso *p)
130 {
131 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
132
133 struct verinfo verinfo = { .s = "__cfi_check", .v = "", .use_vna_hash = false };
134 struct sym_info_pair s_info_p = gnu_hash(verinfo.s);
135 return find_sym_impl(p, &verinfo, s_info_p, 0, p->namespace);
136 }
137
is_addr_in_ldso(size_t a)138 static int is_addr_in_ldso(size_t a)
139 {
140 size_t i = 0;
141 if (DL_FDPIC) {
142 i = count_syms(pldso);
143 if (a - (size_t)pldso->funcdescs < i * sizeof(*pldso->funcdescs))
144 return 1;
145 }
146 if (DL_FDPIC && pldso->loadmap) {
147 for (i = 0; i < pldso->loadmap->nsegs; i++) {
148 if (a - pldso->loadmap->segs[i].p_vaddr
149 < pldso->loadmap->segs[i].p_memsz)
150 return 1;
151 }
152 } else {
153 Phdr *ph = pldso->phdr;
154 size_t phcnt = pldso->phnum;
155 size_t entsz = pldso->phentsize;
156 size_t base = (size_t)pldso->base;
157 for (; phcnt--; ph = (void *)((char *)ph + entsz)) {
158 if (ph->p_type != PT_LOAD) continue;
159 if (a - base - ph->p_vaddr < ph->p_memsz)
160 return 1;
161 }
162 if (a - (size_t)pldso->map < pldso->map_len)
163 return 0;
164 }
165 return 0;
166 }
167
get_cfi_check_addr(uint16_t value,void * func_ptr)168 static uintptr_t get_cfi_check_addr(uint16_t value, void* func_ptr)
169 {
170 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
171
172 uintptr_t addr = (uintptr_t)func_ptr;
173 uintptr_t aligned_addr = ALIGN_DOWN(addr, shadow_alignment) + shadow_alignment;
174 uintptr_t cfi_check_func_addr = aligned_addr - ((uintptr_t)(value - sv_valid_min) << cfi_check_granularity);
175 #ifdef __arm__
176 LD_LOGD("[CFI] [%{public}s] __arm__ defined!\n", __FUNCTION__);
177 cfi_check_func_addr++;
178 #endif
179 LD_LOGD("[CFI] [%{public}s] cfi_check_func_addr[%{public}p] in dso[%{public}s]\n",
180 __FUNCTION__, cfi_check_func_addr, ((struct dso *)addr2dso((size_t)cfi_check_func_addr))->name);
181
182 return cfi_check_func_addr;
183 }
184
cfi_slowpath_common(uint64_t call_site_type_id,void * func_ptr,void * diag_data)185 static inline void cfi_slowpath_common(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
186 {
187 uint16_t value = sv_invalid;
188
189 if (func_ptr == NULL) {
190 return;
191 }
192
193 #if defined(__aarch64__)
194 LD_LOGD("[CFI] [%{public}s] __aarch64__ defined!\n", __FUNCTION__);
195 uintptr_t addr = (uintptr_t)func_ptr & ((1ULL << 56) - 1);
196 #else
197 LD_LOGD("[CFI] [%{public}s] __aarch64__ not defined!\n", __FUNCTION__);
198 uintptr_t addr = func_ptr;
199 #endif
200
201 /* Get shadow value */
202 uintptr_t offset = addr_to_offset(addr, shadow_granularity);
203
204 if (cfi_shadow_start == NULL) {
205 LD_LOGE("[CFI] [%{public}s] the cfi_shadow_start is null!\n", __FUNCTION__);
206 __builtin_trap();
207 }
208
209 if (offset > shadow_size) {
210 LD_LOGE("[CFI] set value to sv_invalid because offset(%{public}x) > shadow_size(%{public}x), "
211 "addr:%{public}p lr:%{public}p.\n",
212 offset, shadow_size, func_ptr, __builtin_return_address(0));
213 value = sv_invalid;
214 } else {
215 value = *((uint16_t*)(cfi_shadow_start + offset));
216 }
217 LD_LOGD("[CFI] [%{public}s] called from %{public}s to %{public}s func_ptr:0x%{public}p shadow value:%{public}d diag_data:0x%{public}p call_site_type_id[%{public}p.\n",
218 __FUNCTION__,
219 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
220 ((struct dso *)addr2dso((size_t)func_ptr))->name,
221 func_ptr, value, diag_data, call_site_type_id);
222
223 struct dso *dso = NULL;
224 switch (value)
225 {
226 case sv_invalid:
227 /* The ldso is an exception because it is loaded by kernel and is not mapped to the CFI shadow.
228 * Do not check it. */
229 if (is_addr_in_ldso((size_t)func_ptr)) {
230 LD_LOGI("[CFI] [%{public}s] uncheck for ldso\n", __FUNCTION__);
231 return;
232 }
233
234 LD_LOGE("[CFI] Invalid shadow value of address:%{public}p, lr:%{public}p.\n",
235 func_ptr, __builtin_return_address(0));
236
237 dso = (struct dso *)addr2dso((size_t)__builtin_return_address(0));
238 if (dso == NULL) {
239 LD_LOGE("[CFI] [%{public}s] can not find matched dso of %{public}p !\n",
240 __FUNCTION__, __builtin_return_address(0));
241 __builtin_trap();
242 }
243 LD_LOGD("[CFI] [%{public}s] dso name[%{public}s]!\n", __FUNCTION__, dso->name);
244
245 struct symdef cfi_check_sym = find_cfi_check_sym(dso);
246 if (!cfi_check_sym.sym) {
247 LD_LOGE("[CFI] [%{public}s] can not find the __cfi_check in the dso!\n", __FUNCTION__);
248 __builtin_trap();
249 }
250 LD_LOGD("[CFI] [%{public}s] cfi_check addr[%{public}p]!\n", __FUNCTION__,
251 LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value));
252 ((cfi_check_t)LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value))(call_site_type_id, func_ptr, diag_data);
253 break;
254 case sv_uncheck:
255 break;
256 default:
257 ((cfi_check_t)get_cfi_check_addr(value, func_ptr))(call_site_type_id, func_ptr, diag_data);
258 break;
259 }
260
261 return;
262 }
263
init_cfi_shadow(struct dso * dso_list,struct dso * ldso)264 int init_cfi_shadow(struct dso *dso_list, struct dso *ldso)
265 {
266 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
267
268 if (dso_list == NULL) {
269 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
270 return CFI_SUCCESS;
271 }
272
273 /* Save the head node of dso list */
274 dso_list_head = dso_list;
275 pldso = ldso;
276
277 return map_dso_to_cfi_shadow(dso_list);
278 }
279
map_dso_to_cfi_shadow(struct dso * dso)280 int map_dso_to_cfi_shadow(struct dso *dso)
281 {
282 bool has_cfi_check = false;
283
284 if (dso == NULL) {
285 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
286 return CFI_SUCCESS;
287 }
288
289 /* If the cfi shadow does not exist, create it and map all the dsos and its dependents to it. */
290 if (cfi_shadow_start == NULL) {
291 /* Find __cfi_check symbol in dso list */
292 for (struct dso *p = dso; p; p = p->next) {
293 if (find_cfi_check_sym(p).sym) {
294 LD_LOGD("[CFI] [%{public}s] find __cfi_check function in dso %{public}s!\n", __FUNCTION__, p->name);
295 has_cfi_check = true;
296 break;
297 }
298 }
299
300 if (has_cfi_check) {
301 if (create_cfi_shadow() == CFI_FAILED) {
302 LD_LOGE("[CFI] [%{public}s] create cfi shadow failed!\n", __FUNCTION__);
303 return CFI_FAILED;
304 }
305 add_dso_to_cfi_shadow(dso_list_head);
306 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
307 }
308 /* If the cfi shadow exists, map the current dso and its dependents to it. */
309 } else {
310 add_dso_to_cfi_shadow(dso);
311 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
312 }
313
314 return CFI_SUCCESS;
315 }
316
unmap_dso_from_cfi_shadow(struct dso * dso)317 void unmap_dso_from_cfi_shadow(struct dso *dso)
318 {
319 if (dso == NULL) {
320 LD_LOGD("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
321 return;
322 }
323
324 LD_LOGD("[CFI] [%{public}s] unmap dso %{public}s from shadow!\n", __FUNCTION__, dso->name);
325
326 if (cfi_shadow_start == NULL)
327 return;
328
329 if (dso->map == 0 || dso->map_len == 0)
330 return;
331
332 if (dso->is_mapped_to_shadow == false)
333 return;
334
335 /* Set the dso's shadow value as invalid. */
336 fill_shadow_value_to_shadow(dso->map, dso->map + dso->map_len, 0, sv_invalid);
337 dso->is_mapped_to_shadow = false;
338 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
339
340 return;
341 }
342
create_cfi_shadow(void)343 static int create_cfi_shadow(void)
344 {
345 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
346
347 /* Each process can load up to (max_target_addr >> shadow_granularity) dsos. Shift left 1 bit because the shadow
348 * value is uint16_t. The size passed to mmap() should be aligned with 4096, so shadow_size should be aligned. */
349 shadow_size = ALIGN_UP(((max_target_addr >> shadow_granularity) << 1), PAGE_SIZE);
350
351 uintptr_t *mmap_addr = mmap(NULL, shadow_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
352
353 if (mmap_addr == MAP_FAILED) {
354 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
355 return CFI_FAILED;
356 }
357
358 cfi_shadow_start = (char*)mmap_addr;
359 LD_LOGD("[CFI] [%{public}s] the cfi_shadow_start addr is %{public}p!\n", __FUNCTION__, cfi_shadow_start);
360
361 return CFI_SUCCESS;
362 }
363
add_dso_to_cfi_shadow(struct dso * dso)364 static int add_dso_to_cfi_shadow(struct dso *dso)
365 {
366 LD_LOGD("[CFI] [%{public}s] start with %{public}s !\n", __FUNCTION__, dso->name);
367 for (struct dso *p = dso; p; p = p->next) {
368 LD_LOGD("[CFI] [%{public}s] adding %{public}s to cfi shadow!\n", __FUNCTION__, p->name);
369 if (p->map == 0 || p->map_len == 0) {
370 LD_LOGW("[CFI] [%{public}s] the dso has no data! map[%{public}p] map_len[0x%{public}x]\n",
371 __FUNCTION__, p->map, p->map_len);
372 continue;
373 }
374
375 if (p->is_mapped_to_shadow == true) {
376 LD_LOGW("[CFI] [%{public}s] %{public}s is already in shadow!\n", __FUNCTION__, p->name);
377 continue;
378 }
379
380 struct symdef cfi_check_sym = find_cfi_check_sym(p);
381 /* If the dso doesn't have __cfi_check(), set it's shadow value unchecked. */
382 if (!cfi_check_sym.sym) {
383 LD_LOGD("[CFI] [%{public}s] %{public}s has no __cfi_check()!\n", __FUNCTION__, p->name);
384 if (fill_shadow_value_to_shadow(p->map, p->map + p->map_len, 0, sv_uncheck) == CFI_FAILED) {
385 LD_LOGE("[CFI] [%{public}s] add dso to cfi shadow failed!\n", __FUNCTION__);
386 return CFI_FAILED;
387 }
388 /* If the dso has __cfi_check(), set it's shadow value valid. */
389 } else {
390 LD_LOGD("[CFI] [%{public}s] %{public}s has __cfi_check()!\n", __FUNCTION__, p->name);
391 uintptr_t end = p->map + p->map_len;
392 uintptr_t cfi_check = LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value);
393
394 if (cfi_check == 0) {
395 LD_LOGE("[CFI] [%{public}s] %{public}s has null cfi_check func!\n", __FUNCTION__, p->name);
396 return CFI_FAILED;
397 }
398 if (fill_shadow_value_to_shadow(p->map, end, cfi_check, sv_valid_min) == CFI_FAILED) {
399 LD_LOGE("[CFI] [%{public}s] add %{public}s to cfi shadow failed!\n", __FUNCTION__, p->name);
400 return CFI_FAILED;
401 }
402 }
403 p->is_mapped_to_shadow = true;
404 LD_LOGD("[CFI] [%{public}s] add %{public}s to cfi shadow succeed.\n", __FUNCTION__, p->name);
405 }
406 LD_LOGD("[CFI] [%{public}s] %{public}s done.\n", __FUNCTION__, dso->name);
407
408 return CFI_SUCCESS;
409 }
410
fill_shadow_value_to_shadow(uintptr_t begin,uintptr_t end,uintptr_t cfi_check,uint16_t type)411 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type)
412 {
413 LD_LOGD("[CFI] [%{public}s] begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
414 __FUNCTION__, begin, end, cfi_check, type);
415
416 /* To ensure the atomicity of the CFI shadow operation, we create a temp_shadow, write the shadow value to
417 * the temp_shadow, and then write it back to the CFI shadow by mremap(). */
418 begin = ALIGN_DOWN(MAX(begin, cfi_check), shadow_alignment);
419 char* shadow_begin = cfi_shadow_start + addr_to_offset(begin, LIBRARY_ALIGNMENT_BITS);
420 char* shadow_end = (char*)(((uint16_t*)(cfi_shadow_start + addr_to_offset(end - 1, LIBRARY_ALIGNMENT_BITS))) + 1);
421 char* aligned_shadow_begin = (char*)ALIGN_DOWN((uintptr_t)shadow_begin, PAGE_SIZE);
422 char* aligned_shadow_end = (char*)ALIGN_UP((uintptr_t)shadow_end, PAGE_SIZE);
423
424 uint16_t tmp_shadow_size = aligned_shadow_end - aligned_shadow_begin;
425 uint16_t offset_begin = shadow_begin - aligned_shadow_begin;
426 uint16_t offset_end = shadow_end - aligned_shadow_begin;
427
428 char* tmp_shadow_start = (char*)mmap(NULL, tmp_shadow_size,
429 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
430
431 if (tmp_shadow_start == MAP_FAILED) {
432 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
433 return CFI_FAILED;
434 }
435
436 LD_LOGD("[CFI] [%{public}s] tmp_shadow_start is %{public}p\t tmp_shadow_size is 0x%{public}x!\n",
437 __FUNCTION__, tmp_shadow_start, tmp_shadow_size);
438 if (mprotect(aligned_shadow_begin, tmp_shadow_size, PROT_READ) == -1) {
439 LD_LOGE("[CFI] [%{public}s] mprotect failed!\n", __FUNCTION__);
440 return CFI_FAILED;
441 }
442 if (type == sv_valid_min) {
443 // We need to copy the whole area because we will read the old value below.
444 memcpy(tmp_shadow_start, aligned_shadow_begin, tmp_shadow_size);
445 } else {
446 memcpy(tmp_shadow_start, aligned_shadow_begin, offset_begin);
447 memcpy(tmp_shadow_start + offset_end, shadow_end, aligned_shadow_end - shadow_end);
448 }
449
450 /* If the dso has __cfi_check(), calculate valid shadow value */
451 if (type == sv_valid_min) {
452 #ifdef __arm__
453 uint16_t shadow_value_begin = ((begin + shadow_alignment - (cfi_check - 1))
454 >> cfi_check_granularity) + sv_valid_min;
455 #else
456 uint16_t shadow_value_begin = ((begin + shadow_alignment - cfi_check)
457 >> cfi_check_granularity) + sv_valid_min;
458 #endif
459 LD_LOGD("[CFI] [%{public}s] shadow_value_begin is 0x%{public}x!\n", __FUNCTION__, shadow_value_begin);
460 uint16_t shadow_value_step = 1 << (shadow_granularity - cfi_check_granularity);
461 uint16_t shadow_value = shadow_value_begin;
462
463 /* Set shadow_value */
464 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
465 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
466 /* If a dso is larger than 16G( = max_shadow_value * shadow_alignment / 1G),
467 * the excess is not checked. */
468 if (shadow_value < shadow_value_begin) {
469 *shadow_addr = sv_uncheck;
470 continue;
471 }
472 *shadow_addr = (*shadow_addr == sv_invalid) ? shadow_value : sv_uncheck;
473 shadow_value += shadow_value_step;
474 }
475 /* in these cases, shadow_value will always be sv_uncheck or sv_invalid */
476 } else if (type == sv_uncheck || type == sv_invalid) {
477 /* Set shadow_value */
478 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
479 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
480 *shadow_addr = type;
481 }
482 } else {
483 LD_LOGE("[CFI] [%{public}s] has error param!\n", __FUNCTION__);
484 munmap(tmp_shadow_start, tmp_shadow_size);
485 return CFI_FAILED;
486 }
487
488 mprotect(tmp_shadow_start, tmp_shadow_size, PROT_READ);
489 /* Remap temp_shadow to CFI shadow. */
490 uint16_t* mremap_addr = mremap(tmp_shadow_start, tmp_shadow_size, tmp_shadow_size,
491 MREMAP_MAYMOVE | MREMAP_FIXED, aligned_shadow_begin);
492
493 if (mremap_addr == MAP_FAILED) {
494 LD_LOGE("[CFI] [%{public}s] mremap failed!\n", __FUNCTION__);
495 munmap(tmp_shadow_start, tmp_shadow_size);
496 return CFI_FAILED;
497 }
498
499 LD_LOGD("[CFI] [%{public}s] fill completed!\n", __FUNCTION__);
500 return CFI_SUCCESS;
501 }
502
__cfi_slowpath(uint64_t call_site_type_id,void * func_ptr)503 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr)
504 {
505 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
506 __FUNCTION__,
507 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
508 ((struct dso *)addr2dso((size_t)func_ptr))->name,
509 func_ptr);
510
511 cfi_slowpath_common(call_site_type_id, func_ptr, NULL);
512 return;
513 }
514
__cfi_slowpath_diag(uint64_t call_site_type_id,void * func_ptr,void * diag_data)515 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
516 {
517 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
518 __FUNCTION__,
519 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
520 ((struct dso *)addr2dso((size_t)func_ptr))->name,
521 func_ptr);
522
523 cfi_slowpath_common(call_site_type_id, func_ptr, diag_data);
524 return;
525 }
526