1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26 struct list_head list;
27 struct instruction *insn;
28 bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
find_insn(struct objtool_file * file,struct section * sec,unsigned long offset)37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
next_insn_same_sec(struct objtool_file * file,struct instruction * insn)50 static struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 struct instruction *next = list_next_entry(insn, list);
54
55 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
56 return NULL;
57
58 return next;
59 }
60
next_insn_same_func(struct objtool_file * file,struct instruction * insn)61 static struct instruction *next_insn_same_func(struct objtool_file *file,
62 struct instruction *insn)
63 {
64 struct instruction *next = list_next_entry(insn, list);
65 struct symbol *func = insn->func;
66
67 if (!func)
68 return NULL;
69
70 if (&next->list != &file->insn_list && next->func == func)
71 return next;
72
73 /* Check if we're already in the subfunction: */
74 if (func == func->cfunc)
75 return NULL;
76
77 /* Move to the subfunction: */
78 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
79 }
80
prev_insn_same_sym(struct objtool_file * file,struct instruction * insn)81 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
82 struct instruction *insn)
83 {
84 struct instruction *prev = list_prev_entry(insn, list);
85
86 if (&prev->list != &file->insn_list && prev->func == insn->func)
87 return prev;
88
89 return NULL;
90 }
91
92 #define func_for_each_insn(file, func, insn) \
93 for (insn = find_insn(file, func->sec, func->offset); \
94 insn; \
95 insn = next_insn_same_func(file, insn))
96
97 #define sym_for_each_insn(file, sym, insn) \
98 for (insn = find_insn(file, sym->sec, sym->offset); \
99 insn && &insn->list != &file->insn_list && \
100 insn->sec == sym->sec && \
101 insn->offset < sym->offset + sym->len; \
102 insn = list_next_entry(insn, list))
103
104 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
105 for (insn = list_prev_entry(insn, list); \
106 &insn->list != &file->insn_list && \
107 insn->sec == sym->sec && insn->offset >= sym->offset; \
108 insn = list_prev_entry(insn, list))
109
110 #define sec_for_each_insn_from(file, insn) \
111 for (; insn; insn = next_insn_same_sec(file, insn))
112
113 #define sec_for_each_insn_continue(file, insn) \
114 for (insn = next_insn_same_sec(file, insn); insn; \
115 insn = next_insn_same_sec(file, insn))
116
is_jump_table_jump(struct instruction * insn)117 static bool is_jump_table_jump(struct instruction *insn)
118 {
119 struct alt_group *alt_group = insn->alt_group;
120
121 if (insn->jump_table)
122 return true;
123
124 /* Retpoline alternative for a jump table? */
125 return alt_group && alt_group->orig_group &&
126 alt_group->orig_group->first_insn->jump_table;
127 }
128
is_sibling_call(struct instruction * insn)129 static bool is_sibling_call(struct instruction *insn)
130 {
131 /*
132 * Assume only ELF functions can make sibling calls. This ensures
133 * sibling call detection consistency between vmlinux.o and individual
134 * objects.
135 */
136 if (!insn->func)
137 return false;
138
139 /* An indirect jump is either a sibling call or a jump to a table. */
140 if (insn->type == INSN_JUMP_DYNAMIC)
141 return !is_jump_table_jump(insn);
142
143 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
144 return (is_static_jump(insn) && insn->call_dest);
145 }
146
147 /*
148 * This checks to see if the given function is a "noreturn" function.
149 *
150 * For global functions which are outside the scope of this object file, we
151 * have to keep a manual list of them.
152 *
153 * For local functions, we have to detect them manually by simply looking for
154 * the lack of a return instruction.
155 */
__dead_end_function(struct objtool_file * file,struct symbol * func,int recursion)156 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
157 int recursion)
158 {
159 int i;
160 struct instruction *insn;
161 bool empty = true;
162
163 /*
164 * Unfortunately these have to be hard coded because the noreturn
165 * attribute isn't provided in ELF data.
166 */
167 static const char * const global_noreturns[] = {
168 "__stack_chk_fail",
169 "panic",
170 "do_exit",
171 "do_task_dead",
172 "make_task_dead",
173 "__module_put_and_exit",
174 "complete_and_exit",
175 "__reiserfs_panic",
176 "lbug_with_loc",
177 "fortify_panic",
178 "usercopy_abort",
179 "machine_real_restart",
180 "rewind_stack_and_make_dead",
181 "kunit_try_catch_throw",
182 "xen_start_kernel",
183 "cpu_bringup_and_idle",
184 "stop_this_cpu",
185 };
186
187 if (!func)
188 return false;
189
190 if (func->bind == STB_WEAK)
191 return false;
192
193 if (func->bind == STB_GLOBAL)
194 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
195 if (!strcmp(func->name, global_noreturns[i]))
196 return true;
197
198 if (!func->len)
199 return false;
200
201 insn = find_insn(file, func->sec, func->offset);
202 if (!insn || !insn->func)
203 return false;
204
205 func_for_each_insn(file, func, insn) {
206 empty = false;
207
208 if (insn->type == INSN_RETURN)
209 return false;
210 }
211
212 if (empty)
213 return false;
214
215 /*
216 * A function can have a sibling call instead of a return. In that
217 * case, the function's dead-end status depends on whether the target
218 * of the sibling call returns.
219 */
220 func_for_each_insn(file, func, insn) {
221 if (is_sibling_call(insn)) {
222 struct instruction *dest = insn->jump_dest;
223
224 if (!dest)
225 /* sibling call to another file */
226 return false;
227
228 /* local sibling call */
229 if (recursion == 5) {
230 /*
231 * Infinite recursion: two functions have
232 * sibling calls to each other. This is a very
233 * rare case. It means they aren't dead ends.
234 */
235 return false;
236 }
237
238 return __dead_end_function(file, dest->func, recursion+1);
239 }
240 }
241
242 return true;
243 }
244
dead_end_function(struct objtool_file * file,struct symbol * func)245 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
246 {
247 return __dead_end_function(file, func, 0);
248 }
249
init_cfi_state(struct cfi_state * cfi)250 static void init_cfi_state(struct cfi_state *cfi)
251 {
252 int i;
253
254 for (i = 0; i < CFI_NUM_REGS; i++) {
255 cfi->regs[i].base = CFI_UNDEFINED;
256 cfi->vals[i].base = CFI_UNDEFINED;
257 }
258 cfi->cfa.base = CFI_UNDEFINED;
259 cfi->drap_reg = CFI_UNDEFINED;
260 cfi->drap_offset = -1;
261 }
262
init_insn_state(struct insn_state * state,struct section * sec)263 static void init_insn_state(struct insn_state *state, struct section *sec)
264 {
265 memset(state, 0, sizeof(*state));
266 init_cfi_state(&state->cfi);
267
268 /*
269 * We need the full vmlinux for noinstr validation, otherwise we can
270 * not correctly determine insn->call_dest->sec (external symbols do
271 * not have a section).
272 */
273 if (vmlinux && noinstr && sec)
274 state->noinstr = sec->noinstr;
275 }
276
cfi_alloc(void)277 static struct cfi_state *cfi_alloc(void)
278 {
279 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
280 if (!cfi) {
281 WARN("calloc failed");
282 exit(1);
283 }
284 nr_cfi++;
285 return cfi;
286 }
287
288 static int cfi_bits;
289 static struct hlist_head *cfi_hash;
290
cficmp(struct cfi_state * cfi1,struct cfi_state * cfi2)291 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
292 {
293 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
294 (void *)cfi2 + sizeof(cfi2->hash),
295 sizeof(struct cfi_state) - sizeof(struct hlist_node));
296 }
297
cfi_key(struct cfi_state * cfi)298 static inline u32 cfi_key(struct cfi_state *cfi)
299 {
300 return jhash((void *)cfi + sizeof(cfi->hash),
301 sizeof(*cfi) - sizeof(cfi->hash), 0);
302 }
303
cfi_hash_find_or_add(struct cfi_state * cfi)304 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
305 {
306 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
307 struct cfi_state *obj;
308
309 hlist_for_each_entry(obj, head, hash) {
310 if (!cficmp(cfi, obj)) {
311 nr_cfi_cache++;
312 return obj;
313 }
314 }
315
316 obj = cfi_alloc();
317 *obj = *cfi;
318 hlist_add_head(&obj->hash, head);
319
320 return obj;
321 }
322
cfi_hash_add(struct cfi_state * cfi)323 static void cfi_hash_add(struct cfi_state *cfi)
324 {
325 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
326
327 hlist_add_head(&cfi->hash, head);
328 }
329
cfi_hash_alloc(unsigned long size)330 static void *cfi_hash_alloc(unsigned long size)
331 {
332 cfi_bits = max(10, ilog2(size));
333 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
334 PROT_READ|PROT_WRITE,
335 MAP_PRIVATE|MAP_ANON, -1, 0);
336 if (cfi_hash == (void *)-1L) {
337 WARN("mmap fail cfi_hash");
338 cfi_hash = NULL;
339 } else if (stats) {
340 printf("cfi_bits: %d\n", cfi_bits);
341 }
342
343 return cfi_hash;
344 }
345
346 static unsigned long nr_insns;
347 static unsigned long nr_insns_visited;
348
349 /*
350 * Call the arch-specific instruction decoder for all the instructions and add
351 * them to the global instruction list.
352 */
decode_instructions(struct objtool_file * file)353 static int decode_instructions(struct objtool_file *file)
354 {
355 struct section *sec;
356 struct symbol *func;
357 unsigned long offset;
358 struct instruction *insn;
359 int ret;
360
361 for_each_sec(file, sec) {
362
363 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
364 continue;
365
366 if (strcmp(sec->name, ".altinstr_replacement") &&
367 strcmp(sec->name, ".altinstr_aux") &&
368 strncmp(sec->name, ".discard.", 9))
369 sec->text = true;
370
371 if (!strcmp(sec->name, ".noinstr.text") ||
372 !strcmp(sec->name, ".entry.text") ||
373 !strncmp(sec->name, ".text..__x86.", 13))
374 sec->noinstr = true;
375
376 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
377 insn = malloc(sizeof(*insn));
378 if (!insn) {
379 WARN("malloc failed");
380 return -1;
381 }
382 memset(insn, 0, sizeof(*insn));
383 INIT_LIST_HEAD(&insn->alts);
384 INIT_LIST_HEAD(&insn->stack_ops);
385
386 insn->sec = sec;
387 insn->offset = offset;
388
389 ret = arch_decode_instruction(file->elf, sec, offset,
390 sec->sh.sh_size - offset,
391 &insn->len, &insn->type,
392 &insn->immediate,
393 &insn->stack_ops);
394 if (ret)
395 goto err;
396
397 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
398 list_add_tail(&insn->list, &file->insn_list);
399 nr_insns++;
400 }
401
402 list_for_each_entry(func, &sec->symbol_list, list) {
403 if (func->type != STT_FUNC || func->alias != func)
404 continue;
405
406 if (!find_insn(file, sec, func->offset)) {
407 WARN("%s(): can't find starting instruction",
408 func->name);
409 return -1;
410 }
411
412 sym_for_each_insn(file, func, insn)
413 insn->func = func;
414 }
415 }
416
417 if (stats)
418 printf("nr_insns: %lu\n", nr_insns);
419
420 return 0;
421
422 err:
423 free(insn);
424 return ret;
425 }
426
find_last_insn(struct objtool_file * file,struct section * sec)427 static struct instruction *find_last_insn(struct objtool_file *file,
428 struct section *sec)
429 {
430 struct instruction *insn = NULL;
431 unsigned int offset;
432 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
433
434 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
435 insn = find_insn(file, sec, offset);
436
437 return insn;
438 }
439
440 /*
441 * Mark "ud2" instructions and manually annotated dead ends.
442 */
add_dead_ends(struct objtool_file * file)443 static int add_dead_ends(struct objtool_file *file)
444 {
445 struct section *sec;
446 struct reloc *reloc;
447 struct instruction *insn;
448
449 /*
450 * By default, "ud2" is a dead end unless otherwise annotated, because
451 * GCC 7 inserts it for certain divide-by-zero cases.
452 */
453 for_each_insn(file, insn)
454 if (insn->type == INSN_BUG)
455 insn->dead_end = true;
456
457 /*
458 * Check for manually annotated dead ends.
459 */
460 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
461 if (!sec)
462 goto reachable;
463
464 list_for_each_entry(reloc, &sec->reloc_list, list) {
465 if (reloc->sym->type != STT_SECTION) {
466 WARN("unexpected relocation symbol type in %s", sec->name);
467 return -1;
468 }
469 insn = find_insn(file, reloc->sym->sec, reloc->addend);
470 if (insn)
471 insn = list_prev_entry(insn, list);
472 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
473 insn = find_last_insn(file, reloc->sym->sec);
474 if (!insn) {
475 WARN("can't find unreachable insn at %s+0x%" PRIx64,
476 reloc->sym->sec->name, reloc->addend);
477 return -1;
478 }
479 } else {
480 WARN("can't find unreachable insn at %s+0x%" PRIx64,
481 reloc->sym->sec->name, reloc->addend);
482 return -1;
483 }
484
485 insn->dead_end = true;
486 }
487
488 reachable:
489 /*
490 * These manually annotated reachable checks are needed for GCC 4.4,
491 * where the Linux unreachable() macro isn't supported. In that case
492 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
493 * not a dead end.
494 */
495 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
496 if (!sec)
497 return 0;
498
499 list_for_each_entry(reloc, &sec->reloc_list, list) {
500 if (reloc->sym->type != STT_SECTION) {
501 WARN("unexpected relocation symbol type in %s", sec->name);
502 return -1;
503 }
504 insn = find_insn(file, reloc->sym->sec, reloc->addend);
505 if (insn)
506 insn = list_prev_entry(insn, list);
507 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
508 insn = find_last_insn(file, reloc->sym->sec);
509 if (!insn) {
510 WARN("can't find reachable insn at %s+0x%" PRIx64,
511 reloc->sym->sec->name, reloc->addend);
512 return -1;
513 }
514 } else {
515 WARN("can't find reachable insn at %s+0x%" PRIx64,
516 reloc->sym->sec->name, reloc->addend);
517 return -1;
518 }
519
520 insn->dead_end = false;
521 }
522
523 return 0;
524 }
525
create_static_call_sections(struct objtool_file * file)526 static int create_static_call_sections(struct objtool_file *file)
527 {
528 struct section *sec;
529 struct static_call_site *site;
530 struct instruction *insn;
531 struct symbol *key_sym;
532 char *key_name, *tmp;
533 int idx;
534
535 sec = find_section_by_name(file->elf, ".static_call_sites");
536 if (sec) {
537 INIT_LIST_HEAD(&file->static_call_list);
538 WARN("file already has .static_call_sites section, skipping");
539 return 0;
540 }
541
542 if (list_empty(&file->static_call_list))
543 return 0;
544
545 idx = 0;
546 list_for_each_entry(insn, &file->static_call_list, call_node)
547 idx++;
548
549 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
550 sizeof(struct static_call_site), idx);
551 if (!sec)
552 return -1;
553
554 idx = 0;
555 list_for_each_entry(insn, &file->static_call_list, call_node) {
556
557 site = (struct static_call_site *)sec->data->d_buf + idx;
558 memset(site, 0, sizeof(struct static_call_site));
559
560 /* populate reloc for 'addr' */
561 if (elf_add_reloc_to_insn(file->elf, sec,
562 idx * sizeof(struct static_call_site),
563 R_X86_64_PC32,
564 insn->sec, insn->offset))
565 return -1;
566
567 /* find key symbol */
568 key_name = strdup(insn->call_dest->name);
569 if (!key_name) {
570 perror("strdup");
571 return -1;
572 }
573 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
574 STATIC_CALL_TRAMP_PREFIX_LEN)) {
575 WARN("static_call: trampoline name malformed: %s", key_name);
576 free(key_name);
577 return -1;
578 }
579 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
580 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
581
582 key_sym = find_symbol_by_name(file->elf, tmp);
583 if (!key_sym) {
584 if (!module) {
585 WARN("static_call: can't find static_call_key symbol: %s", tmp);
586 free(key_name);
587 return -1;
588 }
589
590 /*
591 * For modules(), the key might not be exported, which
592 * means the module can make static calls but isn't
593 * allowed to change them.
594 *
595 * In that case we temporarily set the key to be the
596 * trampoline address. This is fixed up in
597 * static_call_add_module().
598 */
599 key_sym = insn->call_dest;
600 }
601 free(key_name);
602
603 /* populate reloc for 'key' */
604 if (elf_add_reloc(file->elf, sec,
605 idx * sizeof(struct static_call_site) + 4,
606 R_X86_64_PC32, key_sym,
607 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
608 return -1;
609
610 idx++;
611 }
612
613 return 0;
614 }
615
create_retpoline_sites_sections(struct objtool_file * file)616 static int create_retpoline_sites_sections(struct objtool_file *file)
617 {
618 struct instruction *insn;
619 struct section *sec;
620 int idx;
621
622 sec = find_section_by_name(file->elf, ".retpoline_sites");
623 if (sec) {
624 WARN("file already has .retpoline_sites, skipping");
625 return 0;
626 }
627
628 idx = 0;
629 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
630 idx++;
631
632 if (!idx)
633 return 0;
634
635 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
636 sizeof(int), idx);
637 if (!sec) {
638 WARN("elf_create_section: .retpoline_sites");
639 return -1;
640 }
641
642 idx = 0;
643 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
644
645 int *site = (int *)sec->data->d_buf + idx;
646 *site = 0;
647
648 if (elf_add_reloc_to_insn(file->elf, sec,
649 idx * sizeof(int),
650 R_X86_64_PC32,
651 insn->sec, insn->offset)) {
652 WARN("elf_add_reloc_to_insn: .retpoline_sites");
653 return -1;
654 }
655
656 idx++;
657 }
658
659 return 0;
660 }
661
create_return_sites_sections(struct objtool_file * file)662 static int create_return_sites_sections(struct objtool_file *file)
663 {
664 struct instruction *insn;
665 struct section *sec;
666 int idx;
667
668 sec = find_section_by_name(file->elf, ".return_sites");
669 if (sec) {
670 WARN("file already has .return_sites, skipping");
671 return 0;
672 }
673
674 idx = 0;
675 list_for_each_entry(insn, &file->return_thunk_list, call_node)
676 idx++;
677
678 if (!idx)
679 return 0;
680
681 sec = elf_create_section(file->elf, ".return_sites", 0,
682 sizeof(int), idx);
683 if (!sec) {
684 WARN("elf_create_section: .return_sites");
685 return -1;
686 }
687
688 idx = 0;
689 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
690
691 int *site = (int *)sec->data->d_buf + idx;
692 *site = 0;
693
694 if (elf_add_reloc_to_insn(file->elf, sec,
695 idx * sizeof(int),
696 R_X86_64_PC32,
697 insn->sec, insn->offset)) {
698 WARN("elf_add_reloc_to_insn: .return_sites");
699 return -1;
700 }
701
702 idx++;
703 }
704
705 return 0;
706 }
707
create_mcount_loc_sections(struct objtool_file * file)708 static int create_mcount_loc_sections(struct objtool_file *file)
709 {
710 struct section *sec;
711 unsigned long *loc;
712 struct instruction *insn;
713 int idx;
714
715 sec = find_section_by_name(file->elf, "__mcount_loc");
716 if (sec) {
717 INIT_LIST_HEAD(&file->mcount_loc_list);
718 WARN("file already has __mcount_loc section, skipping");
719 return 0;
720 }
721
722 if (list_empty(&file->mcount_loc_list))
723 return 0;
724
725 idx = 0;
726 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
727 idx++;
728
729 sec = elf_create_section(file->elf, "__mcount_loc", 0, sizeof(unsigned long), idx);
730 if (!sec)
731 return -1;
732
733 idx = 0;
734 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
735
736 loc = (unsigned long *)sec->data->d_buf + idx;
737 memset(loc, 0, sizeof(unsigned long));
738
739 if (elf_add_reloc_to_insn(file->elf, sec,
740 idx * sizeof(unsigned long),
741 R_X86_64_64,
742 insn->sec, insn->offset))
743 return -1;
744
745 idx++;
746 }
747
748 return 0;
749 }
750
751 /*
752 * Warnings shouldn't be reported for ignored functions.
753 */
add_ignores(struct objtool_file * file)754 static void add_ignores(struct objtool_file *file)
755 {
756 struct instruction *insn;
757 struct section *sec;
758 struct symbol *func;
759 struct reloc *reloc;
760
761 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
762 if (!sec)
763 return;
764
765 list_for_each_entry(reloc, &sec->reloc_list, list) {
766 switch (reloc->sym->type) {
767 case STT_FUNC:
768 func = reloc->sym;
769 break;
770
771 case STT_SECTION:
772 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
773 if (!func)
774 continue;
775 break;
776
777 default:
778 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
779 continue;
780 }
781
782 func_for_each_insn(file, func, insn)
783 insn->ignore = true;
784 }
785 }
786
787 /*
788 * This is a whitelist of functions that is allowed to be called with AC set.
789 * The list is meant to be minimal and only contains compiler instrumentation
790 * ABI and a few functions used to implement *_{to,from}_user() functions.
791 *
792 * These functions must not directly change AC, but may PUSHF/POPF.
793 */
794 static const char *uaccess_safe_builtin[] = {
795 /* KASAN */
796 "kasan_report",
797 "kasan_check_range",
798 /* KASAN out-of-line */
799 "__asan_loadN_noabort",
800 "__asan_load1_noabort",
801 "__asan_load2_noabort",
802 "__asan_load4_noabort",
803 "__asan_load8_noabort",
804 "__asan_load16_noabort",
805 "__asan_storeN_noabort",
806 "__asan_store1_noabort",
807 "__asan_store2_noabort",
808 "__asan_store4_noabort",
809 "__asan_store8_noabort",
810 "__asan_store16_noabort",
811 "__kasan_check_read",
812 "__kasan_check_write",
813 /* KASAN in-line */
814 "__asan_report_load_n_noabort",
815 "__asan_report_load1_noabort",
816 "__asan_report_load2_noabort",
817 "__asan_report_load4_noabort",
818 "__asan_report_load8_noabort",
819 "__asan_report_load16_noabort",
820 "__asan_report_store_n_noabort",
821 "__asan_report_store1_noabort",
822 "__asan_report_store2_noabort",
823 "__asan_report_store4_noabort",
824 "__asan_report_store8_noabort",
825 "__asan_report_store16_noabort",
826 /* KCSAN */
827 "__kcsan_check_access",
828 "kcsan_found_watchpoint",
829 "kcsan_setup_watchpoint",
830 "kcsan_check_scoped_accesses",
831 "kcsan_disable_current",
832 "kcsan_enable_current_nowarn",
833 /* KCSAN/TSAN */
834 "__tsan_func_entry",
835 "__tsan_func_exit",
836 "__tsan_read_range",
837 "__tsan_write_range",
838 "__tsan_read1",
839 "__tsan_read2",
840 "__tsan_read4",
841 "__tsan_read8",
842 "__tsan_read16",
843 "__tsan_write1",
844 "__tsan_write2",
845 "__tsan_write4",
846 "__tsan_write8",
847 "__tsan_write16",
848 "__tsan_read_write1",
849 "__tsan_read_write2",
850 "__tsan_read_write4",
851 "__tsan_read_write8",
852 "__tsan_read_write16",
853 "__tsan_volatile_read1",
854 "__tsan_volatile_read2",
855 "__tsan_volatile_read4",
856 "__tsan_volatile_read8",
857 "__tsan_volatile_read16",
858 "__tsan_volatile_write1",
859 "__tsan_volatile_write2",
860 "__tsan_volatile_write4",
861 "__tsan_volatile_write8",
862 "__tsan_volatile_write16",
863 "__tsan_atomic8_load",
864 "__tsan_atomic16_load",
865 "__tsan_atomic32_load",
866 "__tsan_atomic64_load",
867 "__tsan_atomic8_store",
868 "__tsan_atomic16_store",
869 "__tsan_atomic32_store",
870 "__tsan_atomic64_store",
871 "__tsan_atomic8_exchange",
872 "__tsan_atomic16_exchange",
873 "__tsan_atomic32_exchange",
874 "__tsan_atomic64_exchange",
875 "__tsan_atomic8_fetch_add",
876 "__tsan_atomic16_fetch_add",
877 "__tsan_atomic32_fetch_add",
878 "__tsan_atomic64_fetch_add",
879 "__tsan_atomic8_fetch_sub",
880 "__tsan_atomic16_fetch_sub",
881 "__tsan_atomic32_fetch_sub",
882 "__tsan_atomic64_fetch_sub",
883 "__tsan_atomic8_fetch_and",
884 "__tsan_atomic16_fetch_and",
885 "__tsan_atomic32_fetch_and",
886 "__tsan_atomic64_fetch_and",
887 "__tsan_atomic8_fetch_or",
888 "__tsan_atomic16_fetch_or",
889 "__tsan_atomic32_fetch_or",
890 "__tsan_atomic64_fetch_or",
891 "__tsan_atomic8_fetch_xor",
892 "__tsan_atomic16_fetch_xor",
893 "__tsan_atomic32_fetch_xor",
894 "__tsan_atomic64_fetch_xor",
895 "__tsan_atomic8_fetch_nand",
896 "__tsan_atomic16_fetch_nand",
897 "__tsan_atomic32_fetch_nand",
898 "__tsan_atomic64_fetch_nand",
899 "__tsan_atomic8_compare_exchange_strong",
900 "__tsan_atomic16_compare_exchange_strong",
901 "__tsan_atomic32_compare_exchange_strong",
902 "__tsan_atomic64_compare_exchange_strong",
903 "__tsan_atomic8_compare_exchange_weak",
904 "__tsan_atomic16_compare_exchange_weak",
905 "__tsan_atomic32_compare_exchange_weak",
906 "__tsan_atomic64_compare_exchange_weak",
907 "__tsan_atomic8_compare_exchange_val",
908 "__tsan_atomic16_compare_exchange_val",
909 "__tsan_atomic32_compare_exchange_val",
910 "__tsan_atomic64_compare_exchange_val",
911 "__tsan_atomic_thread_fence",
912 "__tsan_atomic_signal_fence",
913 "__tsan_unaligned_read16",
914 "__tsan_unaligned_write16",
915 /* KCOV */
916 "write_comp_data",
917 "check_kcov_mode",
918 "__sanitizer_cov_trace_pc",
919 "__sanitizer_cov_trace_const_cmp1",
920 "__sanitizer_cov_trace_const_cmp2",
921 "__sanitizer_cov_trace_const_cmp4",
922 "__sanitizer_cov_trace_const_cmp8",
923 "__sanitizer_cov_trace_cmp1",
924 "__sanitizer_cov_trace_cmp2",
925 "__sanitizer_cov_trace_cmp4",
926 "__sanitizer_cov_trace_cmp8",
927 "__sanitizer_cov_trace_switch",
928 /* UBSAN */
929 "ubsan_type_mismatch_common",
930 "__ubsan_handle_type_mismatch",
931 "__ubsan_handle_type_mismatch_v1",
932 "__ubsan_handle_shift_out_of_bounds",
933 /* misc */
934 "csum_partial_copy_generic",
935 "copy_mc_fragile",
936 "copy_mc_fragile_handle_tail",
937 "copy_mc_enhanced_fast_string",
938 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
939 NULL
940 };
941
add_uaccess_safe(struct objtool_file * file)942 static void add_uaccess_safe(struct objtool_file *file)
943 {
944 struct symbol *func;
945 const char **name;
946
947 if (!uaccess)
948 return;
949
950 for (name = uaccess_safe_builtin; *name; name++) {
951 func = find_symbol_by_name(file->elf, *name);
952 if (!func)
953 continue;
954
955 func->uaccess_safe = true;
956 }
957 }
958
959 /*
960 * FIXME: For now, just ignore any alternatives which add retpolines. This is
961 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
962 * But it at least allows objtool to understand the control flow *around* the
963 * retpoline.
964 */
add_ignore_alternatives(struct objtool_file * file)965 static int add_ignore_alternatives(struct objtool_file *file)
966 {
967 struct section *sec;
968 struct reloc *reloc;
969 struct instruction *insn;
970
971 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
972 if (!sec)
973 return 0;
974
975 list_for_each_entry(reloc, &sec->reloc_list, list) {
976 if (reloc->sym->type != STT_SECTION) {
977 WARN("unexpected relocation symbol type in %s", sec->name);
978 return -1;
979 }
980
981 insn = find_insn(file, reloc->sym->sec, reloc->addend);
982 if (!insn) {
983 WARN("bad .discard.ignore_alts entry");
984 return -1;
985 }
986
987 insn->ignore_alts = true;
988 }
989
990 return 0;
991 }
992
993 /*
994 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
995 * will be added to the .retpoline_sites section.
996 */
arch_is_retpoline(struct symbol * sym)997 __weak bool arch_is_retpoline(struct symbol *sym)
998 {
999 return false;
1000 }
1001
1002 /*
1003 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1004 * will be added to the .return_sites section.
1005 */
arch_is_rethunk(struct symbol * sym)1006 __weak bool arch_is_rethunk(struct symbol *sym)
1007 {
1008 return false;
1009 }
1010
1011 /*
1012 * Symbols that are embedded inside other instructions, because sometimes crazy
1013 * code exists. These are mostly ignored for validation purposes.
1014 */
arch_is_embedded_insn(struct symbol * sym)1015 __weak bool arch_is_embedded_insn(struct symbol *sym)
1016 {
1017 return false;
1018 }
1019
1020 #define NEGATIVE_RELOC ((void *)-1L)
1021
insn_reloc(struct objtool_file * file,struct instruction * insn)1022 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1023 {
1024 if (insn->reloc == NEGATIVE_RELOC)
1025 return NULL;
1026
1027 if (!insn->reloc) {
1028 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1029 insn->offset, insn->len);
1030 if (!insn->reloc) {
1031 insn->reloc = NEGATIVE_RELOC;
1032 return NULL;
1033 }
1034 }
1035
1036 return insn->reloc;
1037 }
1038
remove_insn_ops(struct instruction * insn)1039 static void remove_insn_ops(struct instruction *insn)
1040 {
1041 struct stack_op *op, *tmp;
1042
1043 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
1044 list_del(&op->list);
1045 free(op);
1046 }
1047 }
1048
annotate_call_site(struct objtool_file * file,struct instruction * insn,bool sibling)1049 static void annotate_call_site(struct objtool_file *file,
1050 struct instruction *insn, bool sibling)
1051 {
1052 struct reloc *reloc = insn_reloc(file, insn);
1053 struct symbol *sym = insn->call_dest;
1054
1055 if (!sym)
1056 sym = reloc->sym;
1057
1058 /*
1059 * Alternative replacement code is just template code which is
1060 * sometimes copied to the original instruction. For now, don't
1061 * annotate it. (In the future we might consider annotating the
1062 * original instruction if/when it ever makes sense to do so.)
1063 */
1064 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1065 return;
1066
1067 if (sym->static_call_tramp) {
1068 list_add_tail(&insn->call_node, &file->static_call_list);
1069 return;
1070 }
1071
1072 if (sym->retpoline_thunk) {
1073 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1074 return;
1075 }
1076
1077 /*
1078 * Many compilers cannot disable KCOV with a function attribute
1079 * so they need a little help, NOP out any KCOV calls from noinstr
1080 * text.
1081 */
1082 if (insn->sec->noinstr && sym->kcov) {
1083 if (reloc) {
1084 reloc->type = R_NONE;
1085 elf_write_reloc(file->elf, reloc);
1086 }
1087
1088 elf_write_insn(file->elf, insn->sec,
1089 insn->offset, insn->len,
1090 sibling ? arch_ret_insn(insn->len)
1091 : arch_nop_insn(insn->len));
1092
1093 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1094
1095 if (sibling) {
1096 /*
1097 * We've replaced the tail-call JMP insn by two new
1098 * insn: RET; INT3, except we only have a single struct
1099 * insn here. Mark it retpoline_safe to avoid the SLS
1100 * warning, instead of adding another insn.
1101 */
1102 insn->retpoline_safe = true;
1103 }
1104
1105 return;
1106 }
1107
1108 if (mcount && sym->fentry) {
1109 if (sibling)
1110 WARN_FUNC("Tail call to __fentry__ !?!?", insn->sec, insn->offset);
1111
1112 if (reloc) {
1113 reloc->type = R_NONE;
1114 elf_write_reloc(file->elf, reloc);
1115 }
1116
1117 elf_write_insn(file->elf, insn->sec,
1118 insn->offset, insn->len,
1119 arch_nop_insn(insn->len));
1120
1121 insn->type = INSN_NOP;
1122
1123 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1124 return;
1125 }
1126 }
1127
add_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * dest,bool sibling)1128 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1129 struct symbol *dest, bool sibling)
1130 {
1131 insn->call_dest = dest;
1132 if (!dest)
1133 return;
1134
1135 /*
1136 * Whatever stack impact regular CALLs have, should be undone
1137 * by the RETURN of the called function.
1138 *
1139 * Annotated intra-function calls retain the stack_ops but
1140 * are converted to JUMP, see read_intra_function_calls().
1141 */
1142 remove_insn_ops(insn);
1143
1144 annotate_call_site(file, insn, sibling);
1145 }
1146
add_retpoline_call(struct objtool_file * file,struct instruction * insn)1147 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1148 {
1149 /*
1150 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1151 * so convert them accordingly.
1152 */
1153 switch (insn->type) {
1154 case INSN_CALL:
1155 insn->type = INSN_CALL_DYNAMIC;
1156 break;
1157 case INSN_JUMP_UNCONDITIONAL:
1158 insn->type = INSN_JUMP_DYNAMIC;
1159 break;
1160 case INSN_JUMP_CONDITIONAL:
1161 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1162 break;
1163 default:
1164 return;
1165 }
1166
1167 insn->retpoline_safe = true;
1168
1169 /*
1170 * Whatever stack impact regular CALLs have, should be undone
1171 * by the RETURN of the called function.
1172 *
1173 * Annotated intra-function calls retain the stack_ops but
1174 * are converted to JUMP, see read_intra_function_calls().
1175 */
1176 remove_insn_ops(insn);
1177
1178 annotate_call_site(file, insn, false);
1179 }
1180
add_return_call(struct objtool_file * file,struct instruction * insn,bool add)1181 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1182 {
1183 /*
1184 * Return thunk tail calls are really just returns in disguise,
1185 * so convert them accordingly.
1186 */
1187 insn->type = INSN_RETURN;
1188 insn->retpoline_safe = true;
1189
1190 /* Skip the non-text sections, specially .discard ones */
1191 if (add && insn->sec->text)
1192 list_add_tail(&insn->call_node, &file->return_thunk_list);
1193 }
1194
1195 /*
1196 * Find the destination instructions for all jumps.
1197 */
add_jump_destinations(struct objtool_file * file)1198 static int add_jump_destinations(struct objtool_file *file)
1199 {
1200 struct instruction *insn;
1201 struct reloc *reloc;
1202 struct section *dest_sec;
1203 unsigned long dest_off;
1204
1205 for_each_insn(file, insn) {
1206 if (!is_static_jump(insn))
1207 continue;
1208
1209 reloc = insn_reloc(file, insn);
1210 if (!reloc) {
1211 dest_sec = insn->sec;
1212 dest_off = arch_jump_destination(insn);
1213 } else if (reloc->sym->type == STT_SECTION) {
1214 dest_sec = reloc->sym->sec;
1215 dest_off = arch_dest_reloc_offset(reloc->addend);
1216 } else if (reloc->sym->retpoline_thunk) {
1217 add_retpoline_call(file, insn);
1218 continue;
1219 } else if (reloc->sym->return_thunk) {
1220 add_return_call(file, insn, true);
1221 continue;
1222 } else if (insn->func) {
1223 /* internal or external sibling call (with reloc) */
1224 add_call_dest(file, insn, reloc->sym, true);
1225 continue;
1226 } else if (reloc->sym->sec->idx) {
1227 dest_sec = reloc->sym->sec;
1228 dest_off = reloc->sym->sym.st_value +
1229 arch_dest_reloc_offset(reloc->addend);
1230 } else {
1231 /* non-func asm code jumping to another file */
1232 continue;
1233 }
1234
1235 insn->jump_dest = find_insn(file, dest_sec, dest_off);
1236 if (!insn->jump_dest) {
1237 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1238
1239 /*
1240 * This is a special case where an alt instruction
1241 * jumps past the end of the section. These are
1242 * handled later in handle_group_alt().
1243 */
1244 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1245 continue;
1246
1247 /*
1248 * This is a special case for retbleed_untrain_ret().
1249 * It jumps to __x86_return_thunk(), but objtool
1250 * can't find the thunk's starting RET
1251 * instruction, because the RET is also in the
1252 * middle of another instruction. Objtool only
1253 * knows about the outer instruction.
1254 */
1255 if (sym && sym->embedded_insn) {
1256 add_return_call(file, insn, false);
1257 continue;
1258 }
1259
1260 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1261 insn->sec, insn->offset, dest_sec->name,
1262 dest_off);
1263 return -1;
1264 }
1265
1266 /*
1267 * Cross-function jump.
1268 */
1269 if (insn->func && insn->jump_dest->func &&
1270 insn->func != insn->jump_dest->func) {
1271
1272 /*
1273 * For GCC 8+, create parent/child links for any cold
1274 * subfunctions. This is _mostly_ redundant with a
1275 * similar initialization in read_symbols().
1276 *
1277 * If a function has aliases, we want the *first* such
1278 * function in the symbol table to be the subfunction's
1279 * parent. In that case we overwrite the
1280 * initialization done in read_symbols().
1281 *
1282 * However this code can't completely replace the
1283 * read_symbols() code because this doesn't detect the
1284 * case where the parent function's only reference to a
1285 * subfunction is through a jump table.
1286 */
1287 if (!strstr(insn->func->name, ".cold") &&
1288 strstr(insn->jump_dest->func->name, ".cold")) {
1289 insn->func->cfunc = insn->jump_dest->func;
1290 insn->jump_dest->func->pfunc = insn->func;
1291
1292 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
1293 insn->jump_dest->offset == insn->jump_dest->func->offset) {
1294 /* internal sibling call (without reloc) */
1295 add_call_dest(file, insn, insn->jump_dest->func, true);
1296 }
1297 }
1298 }
1299
1300 return 0;
1301 }
1302
find_call_destination(struct section * sec,unsigned long offset)1303 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1304 {
1305 struct symbol *call_dest;
1306
1307 call_dest = find_func_by_offset(sec, offset);
1308 if (!call_dest)
1309 call_dest = find_symbol_by_offset(sec, offset);
1310
1311 return call_dest;
1312 }
1313
1314 /*
1315 * Find the destination instructions for all calls.
1316 */
add_call_destinations(struct objtool_file * file)1317 static int add_call_destinations(struct objtool_file *file)
1318 {
1319 struct instruction *insn;
1320 unsigned long dest_off;
1321 struct symbol *dest;
1322 struct reloc *reloc;
1323
1324 for_each_insn(file, insn) {
1325 if (insn->type != INSN_CALL)
1326 continue;
1327
1328 reloc = insn_reloc(file, insn);
1329 if (!reloc) {
1330 dest_off = arch_jump_destination(insn);
1331 dest = find_call_destination(insn->sec, dest_off);
1332
1333 add_call_dest(file, insn, dest, false);
1334
1335 if (insn->ignore)
1336 continue;
1337
1338 if (!insn->call_dest) {
1339 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1340 return -1;
1341 }
1342
1343 if (insn->func && insn->call_dest->type != STT_FUNC) {
1344 WARN_FUNC("unsupported call to non-function",
1345 insn->sec, insn->offset);
1346 return -1;
1347 }
1348
1349 } else if (reloc->sym->type == STT_SECTION) {
1350 dest_off = arch_dest_reloc_offset(reloc->addend);
1351 dest = find_call_destination(reloc->sym->sec, dest_off);
1352 if (!dest) {
1353 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1354 insn->sec, insn->offset,
1355 reloc->sym->sec->name,
1356 dest_off);
1357 return -1;
1358 }
1359
1360 add_call_dest(file, insn, dest, false);
1361
1362 } else if (reloc->sym->retpoline_thunk) {
1363 add_retpoline_call(file, insn);
1364
1365 } else
1366 add_call_dest(file, insn, reloc->sym, false);
1367 }
1368
1369 return 0;
1370 }
1371
1372 /*
1373 * The .alternatives section requires some extra special care over and above
1374 * other special sections because alternatives are patched in place.
1375 */
handle_group_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1376 static int handle_group_alt(struct objtool_file *file,
1377 struct special_alt *special_alt,
1378 struct instruction *orig_insn,
1379 struct instruction **new_insn)
1380 {
1381 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1382 struct alt_group *orig_alt_group, *new_alt_group;
1383 unsigned long dest_off;
1384
1385
1386 orig_alt_group = malloc(sizeof(*orig_alt_group));
1387 if (!orig_alt_group) {
1388 WARN("malloc failed");
1389 return -1;
1390 }
1391 orig_alt_group->cfi = calloc(special_alt->orig_len,
1392 sizeof(struct cfi_state *));
1393 if (!orig_alt_group->cfi) {
1394 WARN("calloc failed");
1395 return -1;
1396 }
1397
1398 last_orig_insn = NULL;
1399 insn = orig_insn;
1400 sec_for_each_insn_from(file, insn) {
1401 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1402 break;
1403
1404 insn->alt_group = orig_alt_group;
1405 last_orig_insn = insn;
1406 }
1407 orig_alt_group->orig_group = NULL;
1408 orig_alt_group->first_insn = orig_insn;
1409 orig_alt_group->last_insn = last_orig_insn;
1410
1411
1412 new_alt_group = malloc(sizeof(*new_alt_group));
1413 if (!new_alt_group) {
1414 WARN("malloc failed");
1415 return -1;
1416 }
1417
1418 if (special_alt->new_len < special_alt->orig_len) {
1419 /*
1420 * Insert a fake nop at the end to make the replacement
1421 * alt_group the same size as the original. This is needed to
1422 * allow propagate_alt_cfi() to do its magic. When the last
1423 * instruction affects the stack, the instruction after it (the
1424 * nop) will propagate the new state to the shared CFI array.
1425 */
1426 nop = malloc(sizeof(*nop));
1427 if (!nop) {
1428 WARN("malloc failed");
1429 return -1;
1430 }
1431 memset(nop, 0, sizeof(*nop));
1432 INIT_LIST_HEAD(&nop->alts);
1433 INIT_LIST_HEAD(&nop->stack_ops);
1434
1435 nop->sec = special_alt->new_sec;
1436 nop->offset = special_alt->new_off + special_alt->new_len;
1437 nop->len = special_alt->orig_len - special_alt->new_len;
1438 nop->type = INSN_NOP;
1439 nop->func = orig_insn->func;
1440 nop->alt_group = new_alt_group;
1441 nop->ignore = orig_insn->ignore_alts;
1442 }
1443
1444 if (!special_alt->new_len) {
1445 *new_insn = nop;
1446 goto end;
1447 }
1448
1449 insn = *new_insn;
1450 sec_for_each_insn_from(file, insn) {
1451 struct reloc *alt_reloc;
1452
1453 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1454 break;
1455
1456 last_new_insn = insn;
1457
1458 insn->ignore = orig_insn->ignore_alts;
1459 insn->func = orig_insn->func;
1460 insn->alt_group = new_alt_group;
1461
1462 /*
1463 * Since alternative replacement code is copy/pasted by the
1464 * kernel after applying relocations, generally such code can't
1465 * have relative-address relocation references to outside the
1466 * .altinstr_replacement section, unless the arch's
1467 * alternatives code can adjust the relative offsets
1468 * accordingly.
1469 */
1470 alt_reloc = insn_reloc(file, insn);
1471 if (alt_reloc &&
1472 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1473
1474 WARN_FUNC("unsupported relocation in alternatives section",
1475 insn->sec, insn->offset);
1476 return -1;
1477 }
1478
1479 if (!is_static_jump(insn))
1480 continue;
1481
1482 if (!insn->immediate)
1483 continue;
1484
1485 dest_off = arch_jump_destination(insn);
1486 if (dest_off == special_alt->new_off + special_alt->new_len)
1487 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1488
1489 if (!insn->jump_dest) {
1490 WARN_FUNC("can't find alternative jump destination",
1491 insn->sec, insn->offset);
1492 return -1;
1493 }
1494 }
1495
1496 if (!last_new_insn) {
1497 WARN_FUNC("can't find last new alternative instruction",
1498 special_alt->new_sec, special_alt->new_off);
1499 return -1;
1500 }
1501
1502 if (nop)
1503 list_add(&nop->list, &last_new_insn->list);
1504 end:
1505 new_alt_group->orig_group = orig_alt_group;
1506 new_alt_group->first_insn = *new_insn;
1507 new_alt_group->last_insn = nop ? : last_new_insn;
1508 new_alt_group->cfi = orig_alt_group->cfi;
1509 return 0;
1510 }
1511
1512 /*
1513 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1514 * If the original instruction is a jump, make the alt entry an effective nop
1515 * by just skipping the original instruction.
1516 */
handle_jump_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1517 static int handle_jump_alt(struct objtool_file *file,
1518 struct special_alt *special_alt,
1519 struct instruction *orig_insn,
1520 struct instruction **new_insn)
1521 {
1522 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1523 orig_insn->type != INSN_NOP) {
1524
1525 WARN_FUNC("unsupported instruction at jump label",
1526 orig_insn->sec, orig_insn->offset);
1527 return -1;
1528 }
1529
1530 if (special_alt->key_addend & 2) {
1531 struct reloc *reloc = insn_reloc(file, orig_insn);
1532
1533 if (reloc) {
1534 reloc->type = R_NONE;
1535 elf_write_reloc(file->elf, reloc);
1536 }
1537 elf_write_insn(file->elf, orig_insn->sec,
1538 orig_insn->offset, orig_insn->len,
1539 arch_nop_insn(orig_insn->len));
1540 orig_insn->type = INSN_NOP;
1541 }
1542
1543 if (orig_insn->type == INSN_NOP) {
1544 if (orig_insn->len == 2)
1545 file->jl_nop_short++;
1546 else
1547 file->jl_nop_long++;
1548
1549 return 0;
1550 }
1551
1552 if (orig_insn->len == 2)
1553 file->jl_short++;
1554 else
1555 file->jl_long++;
1556
1557 *new_insn = list_next_entry(orig_insn, list);
1558 return 0;
1559 }
1560
1561 /*
1562 * Read all the special sections which have alternate instructions which can be
1563 * patched in or redirected to at runtime. Each instruction having alternate
1564 * instruction(s) has them added to its insn->alts list, which will be
1565 * traversed in validate_branch().
1566 */
add_special_section_alts(struct objtool_file * file)1567 static int add_special_section_alts(struct objtool_file *file)
1568 {
1569 struct list_head special_alts;
1570 struct instruction *orig_insn, *new_insn;
1571 struct special_alt *special_alt, *tmp;
1572 struct alternative *alt;
1573 int ret;
1574
1575 ret = special_get_alts(file->elf, &special_alts);
1576 if (ret)
1577 return ret;
1578
1579 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1580
1581 orig_insn = find_insn(file, special_alt->orig_sec,
1582 special_alt->orig_off);
1583 if (!orig_insn) {
1584 WARN_FUNC("special: can't find orig instruction",
1585 special_alt->orig_sec, special_alt->orig_off);
1586 ret = -1;
1587 goto out;
1588 }
1589
1590 new_insn = NULL;
1591 if (!special_alt->group || special_alt->new_len) {
1592 new_insn = find_insn(file, special_alt->new_sec,
1593 special_alt->new_off);
1594 if (!new_insn) {
1595 WARN_FUNC("special: can't find new instruction",
1596 special_alt->new_sec,
1597 special_alt->new_off);
1598 ret = -1;
1599 goto out;
1600 }
1601 }
1602
1603 if (special_alt->group) {
1604 if (!special_alt->orig_len) {
1605 WARN_FUNC("empty alternative entry",
1606 orig_insn->sec, orig_insn->offset);
1607 continue;
1608 }
1609
1610 ret = handle_group_alt(file, special_alt, orig_insn,
1611 &new_insn);
1612 if (ret)
1613 goto out;
1614 } else if (special_alt->jump_or_nop) {
1615 ret = handle_jump_alt(file, special_alt, orig_insn,
1616 &new_insn);
1617 if (ret)
1618 goto out;
1619 }
1620
1621 alt = malloc(sizeof(*alt));
1622 if (!alt) {
1623 WARN("malloc failed");
1624 ret = -1;
1625 goto out;
1626 }
1627
1628 alt->insn = new_insn;
1629 alt->skip_orig = special_alt->skip_orig;
1630 orig_insn->ignore_alts |= special_alt->skip_alt;
1631 list_add_tail(&alt->list, &orig_insn->alts);
1632
1633 list_del(&special_alt->list);
1634 free(special_alt);
1635 }
1636
1637 if (stats) {
1638 printf("jl\\\tNOP\tJMP\n");
1639 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1640 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1641 }
1642
1643 out:
1644 return ret;
1645 }
1646
add_jump_table(struct objtool_file * file,struct instruction * insn,struct reloc * table)1647 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1648 struct reloc *table)
1649 {
1650 struct reloc *reloc = table;
1651 struct instruction *dest_insn;
1652 struct alternative *alt;
1653 struct symbol *pfunc = insn->func->pfunc;
1654 unsigned int prev_offset = 0;
1655
1656 /*
1657 * Each @reloc is a switch table relocation which points to the target
1658 * instruction.
1659 */
1660 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1661
1662 /* Check for the end of the table: */
1663 if (reloc != table && reloc->jump_table_start)
1664 break;
1665
1666 /* Make sure the table entries are consecutive: */
1667 if (prev_offset && reloc->offset != prev_offset + 8)
1668 break;
1669
1670 /* Detect function pointers from contiguous objects: */
1671 if (reloc->sym->sec == pfunc->sec &&
1672 reloc->addend == pfunc->offset)
1673 break;
1674
1675 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1676 if (!dest_insn)
1677 break;
1678
1679 /* Make sure the destination is in the same function: */
1680 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1681 break;
1682
1683 alt = malloc(sizeof(*alt));
1684 if (!alt) {
1685 WARN("malloc failed");
1686 return -1;
1687 }
1688
1689 alt->insn = dest_insn;
1690 list_add_tail(&alt->list, &insn->alts);
1691 prev_offset = reloc->offset;
1692 }
1693
1694 if (!prev_offset) {
1695 WARN_FUNC("can't find switch jump table",
1696 insn->sec, insn->offset);
1697 return -1;
1698 }
1699
1700 return 0;
1701 }
1702
1703 /*
1704 * find_jump_table() - Given a dynamic jump, find the switch jump table
1705 * associated with it.
1706 */
find_jump_table(struct objtool_file * file,struct symbol * func,struct instruction * insn)1707 static struct reloc *find_jump_table(struct objtool_file *file,
1708 struct symbol *func,
1709 struct instruction *insn)
1710 {
1711 struct reloc *table_reloc;
1712 struct instruction *dest_insn, *orig_insn = insn;
1713
1714 /*
1715 * Backward search using the @first_jump_src links, these help avoid
1716 * much of the 'in between' code. Which avoids us getting confused by
1717 * it.
1718 */
1719 for (;
1720 insn && insn->func && insn->func->pfunc == func;
1721 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1722
1723 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1724 break;
1725
1726 /* allow small jumps within the range */
1727 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1728 insn->jump_dest &&
1729 (insn->jump_dest->offset <= insn->offset ||
1730 insn->jump_dest->offset > orig_insn->offset))
1731 break;
1732
1733 table_reloc = arch_find_switch_table(file, insn);
1734 if (!table_reloc)
1735 continue;
1736 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1737 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1738 continue;
1739
1740 return table_reloc;
1741 }
1742
1743 return NULL;
1744 }
1745
1746 /*
1747 * First pass: Mark the head of each jump table so that in the next pass,
1748 * we know when a given jump table ends and the next one starts.
1749 */
mark_func_jump_tables(struct objtool_file * file,struct symbol * func)1750 static void mark_func_jump_tables(struct objtool_file *file,
1751 struct symbol *func)
1752 {
1753 struct instruction *insn, *last = NULL;
1754 struct reloc *reloc;
1755
1756 func_for_each_insn(file, func, insn) {
1757 if (!last)
1758 last = insn;
1759
1760 /*
1761 * Store back-pointers for unconditional forward jumps such
1762 * that find_jump_table() can back-track using those and
1763 * avoid some potentially confusing code.
1764 */
1765 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1766 insn->offset > last->offset &&
1767 insn->jump_dest->offset > insn->offset &&
1768 !insn->jump_dest->first_jump_src) {
1769
1770 insn->jump_dest->first_jump_src = insn;
1771 last = insn->jump_dest;
1772 }
1773
1774 if (insn->type != INSN_JUMP_DYNAMIC)
1775 continue;
1776
1777 reloc = find_jump_table(file, func, insn);
1778 if (reloc) {
1779 reloc->jump_table_start = true;
1780 insn->jump_table = reloc;
1781 }
1782 }
1783 }
1784
add_func_jump_tables(struct objtool_file * file,struct symbol * func)1785 static int add_func_jump_tables(struct objtool_file *file,
1786 struct symbol *func)
1787 {
1788 struct instruction *insn;
1789 int ret;
1790
1791 func_for_each_insn(file, func, insn) {
1792 if (!insn->jump_table)
1793 continue;
1794
1795 ret = add_jump_table(file, insn, insn->jump_table);
1796 if (ret)
1797 return ret;
1798 }
1799
1800 return 0;
1801 }
1802
1803 /*
1804 * For some switch statements, gcc generates a jump table in the .rodata
1805 * section which contains a list of addresses within the function to jump to.
1806 * This finds these jump tables and adds them to the insn->alts lists.
1807 */
add_jump_table_alts(struct objtool_file * file)1808 static int add_jump_table_alts(struct objtool_file *file)
1809 {
1810 struct section *sec;
1811 struct symbol *func;
1812 int ret;
1813
1814 if (!file->rodata)
1815 return 0;
1816
1817 for_each_sec(file, sec) {
1818 list_for_each_entry(func, &sec->symbol_list, list) {
1819 if (func->type != STT_FUNC)
1820 continue;
1821
1822 mark_func_jump_tables(file, func);
1823 ret = add_func_jump_tables(file, func);
1824 if (ret)
1825 return ret;
1826 }
1827 }
1828
1829 return 0;
1830 }
1831
set_func_state(struct cfi_state * state)1832 static void set_func_state(struct cfi_state *state)
1833 {
1834 state->cfa = initial_func_cfi.cfa;
1835 memcpy(&state->regs, &initial_func_cfi.regs,
1836 CFI_NUM_REGS * sizeof(struct cfi_reg));
1837 state->stack_size = initial_func_cfi.cfa.offset;
1838 }
1839
read_unwind_hints(struct objtool_file * file)1840 static int read_unwind_hints(struct objtool_file *file)
1841 {
1842 struct cfi_state cfi = init_cfi;
1843 struct section *sec, *relocsec;
1844 struct unwind_hint *hint;
1845 struct instruction *insn;
1846 struct reloc *reloc;
1847 int i;
1848
1849 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1850 if (!sec)
1851 return 0;
1852
1853 relocsec = sec->reloc;
1854 if (!relocsec) {
1855 WARN("missing .rela.discard.unwind_hints section");
1856 return -1;
1857 }
1858
1859 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
1860 WARN("struct unwind_hint size mismatch");
1861 return -1;
1862 }
1863
1864 file->hints = true;
1865
1866 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
1867 hint = (struct unwind_hint *)sec->data->d_buf + i;
1868
1869 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1870 if (!reloc) {
1871 WARN("can't find reloc for unwind_hints[%d]", i);
1872 return -1;
1873 }
1874
1875 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1876 if (!insn) {
1877 WARN("can't find insn for unwind_hints[%d]", i);
1878 return -1;
1879 }
1880
1881 insn->hint = true;
1882
1883 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
1884 insn->hint = false;
1885 insn->save = true;
1886 continue;
1887 }
1888
1889 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
1890 insn->restore = true;
1891 continue;
1892 }
1893
1894 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
1895 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
1896
1897 if (sym && sym->bind == STB_GLOBAL) {
1898 insn->entry = 1;
1899 }
1900 }
1901
1902 if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
1903 hint->type = UNWIND_HINT_TYPE_CALL;
1904 insn->entry = 1;
1905 }
1906
1907 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1908 insn->cfi = &func_cfi;
1909 continue;
1910 }
1911
1912 if (insn->cfi)
1913 cfi = *(insn->cfi);
1914
1915 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1916 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1917 insn->sec, insn->offset, hint->sp_reg);
1918 return -1;
1919 }
1920
1921 cfi.cfa.offset = bswap_if_needed(hint->sp_offset);
1922 cfi.type = hint->type;
1923 cfi.end = hint->end;
1924
1925 insn->cfi = cfi_hash_find_or_add(&cfi);
1926 }
1927
1928 return 0;
1929 }
1930
read_retpoline_hints(struct objtool_file * file)1931 static int read_retpoline_hints(struct objtool_file *file)
1932 {
1933 struct section *sec;
1934 struct instruction *insn;
1935 struct reloc *reloc;
1936
1937 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1938 if (!sec)
1939 return 0;
1940
1941 list_for_each_entry(reloc, &sec->reloc_list, list) {
1942 if (reloc->sym->type != STT_SECTION) {
1943 WARN("unexpected relocation symbol type in %s", sec->name);
1944 return -1;
1945 }
1946
1947 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1948 if (!insn) {
1949 WARN("bad .discard.retpoline_safe entry");
1950 return -1;
1951 }
1952
1953 if (insn->type != INSN_JUMP_DYNAMIC &&
1954 insn->type != INSN_CALL_DYNAMIC &&
1955 insn->type != INSN_RETURN &&
1956 insn->type != INSN_NOP) {
1957 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
1958 insn->sec, insn->offset);
1959 return -1;
1960 }
1961
1962 insn->retpoline_safe = true;
1963 }
1964
1965 return 0;
1966 }
1967
read_instr_hints(struct objtool_file * file)1968 static int read_instr_hints(struct objtool_file *file)
1969 {
1970 struct section *sec;
1971 struct instruction *insn;
1972 struct reloc *reloc;
1973
1974 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1975 if (!sec)
1976 return 0;
1977
1978 list_for_each_entry(reloc, &sec->reloc_list, list) {
1979 if (reloc->sym->type != STT_SECTION) {
1980 WARN("unexpected relocation symbol type in %s", sec->name);
1981 return -1;
1982 }
1983
1984 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1985 if (!insn) {
1986 WARN("bad .discard.instr_end entry");
1987 return -1;
1988 }
1989
1990 insn->instr--;
1991 }
1992
1993 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1994 if (!sec)
1995 return 0;
1996
1997 list_for_each_entry(reloc, &sec->reloc_list, list) {
1998 if (reloc->sym->type != STT_SECTION) {
1999 WARN("unexpected relocation symbol type in %s", sec->name);
2000 return -1;
2001 }
2002
2003 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2004 if (!insn) {
2005 WARN("bad .discard.instr_begin entry");
2006 return -1;
2007 }
2008
2009 insn->instr++;
2010 }
2011
2012 return 0;
2013 }
2014
read_intra_function_calls(struct objtool_file * file)2015 static int read_intra_function_calls(struct objtool_file *file)
2016 {
2017 struct instruction *insn;
2018 struct section *sec;
2019 struct reloc *reloc;
2020
2021 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2022 if (!sec)
2023 return 0;
2024
2025 list_for_each_entry(reloc, &sec->reloc_list, list) {
2026 unsigned long dest_off;
2027
2028 if (reloc->sym->type != STT_SECTION) {
2029 WARN("unexpected relocation symbol type in %s",
2030 sec->name);
2031 return -1;
2032 }
2033
2034 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2035 if (!insn) {
2036 WARN("bad .discard.intra_function_call entry");
2037 return -1;
2038 }
2039
2040 if (insn->type != INSN_CALL) {
2041 WARN_FUNC("intra_function_call not a direct call",
2042 insn->sec, insn->offset);
2043 return -1;
2044 }
2045
2046 /*
2047 * Treat intra-function CALLs as JMPs, but with a stack_op.
2048 * See add_call_destinations(), which strips stack_ops from
2049 * normal CALLs.
2050 */
2051 insn->type = INSN_JUMP_UNCONDITIONAL;
2052
2053 dest_off = insn->offset + insn->len + insn->immediate;
2054 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2055 if (!insn->jump_dest) {
2056 WARN_FUNC("can't find call dest at %s+0x%lx",
2057 insn->sec, insn->offset,
2058 insn->sec->name, dest_off);
2059 return -1;
2060 }
2061 }
2062
2063 return 0;
2064 }
2065
classify_symbols(struct objtool_file * file)2066 static int classify_symbols(struct objtool_file *file)
2067 {
2068 struct section *sec;
2069 struct symbol *func;
2070
2071 for_each_sec(file, sec) {
2072 list_for_each_entry(func, &sec->symbol_list, list) {
2073 if (func->bind != STB_GLOBAL)
2074 continue;
2075
2076 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2077 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2078 func->static_call_tramp = true;
2079
2080 if (arch_is_retpoline(func))
2081 func->retpoline_thunk = true;
2082
2083 if (arch_is_rethunk(func))
2084 func->return_thunk = true;
2085
2086 if (arch_is_embedded_insn(func))
2087 func->embedded_insn = true;
2088
2089 if (!strcmp(func->name, "__fentry__"))
2090 func->fentry = true;
2091
2092 if (!strncmp(func->name, "__sanitizer_cov_", 16))
2093 func->kcov = true;
2094 }
2095 }
2096
2097 return 0;
2098 }
2099
mark_rodata(struct objtool_file * file)2100 static void mark_rodata(struct objtool_file *file)
2101 {
2102 struct section *sec;
2103 bool found = false;
2104
2105 /*
2106 * Search for the following rodata sections, each of which can
2107 * potentially contain jump tables:
2108 *
2109 * - .rodata: can contain GCC switch tables
2110 * - .rodata.<func>: same, if -fdata-sections is being used
2111 * - .rodata..c_jump_table: contains C annotated jump tables
2112 *
2113 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2114 */
2115 for_each_sec(file, sec) {
2116 if (!strncmp(sec->name, ".rodata", 7) &&
2117 !strstr(sec->name, ".str1.")) {
2118 sec->rodata = true;
2119 found = true;
2120 }
2121 }
2122
2123 file->rodata = found;
2124 }
2125
decode_sections(struct objtool_file * file)2126 static int decode_sections(struct objtool_file *file)
2127 {
2128 int ret;
2129
2130 mark_rodata(file);
2131
2132 ret = decode_instructions(file);
2133 if (ret)
2134 return ret;
2135
2136 ret = add_dead_ends(file);
2137 if (ret)
2138 return ret;
2139
2140 add_ignores(file);
2141 add_uaccess_safe(file);
2142
2143 ret = add_ignore_alternatives(file);
2144 if (ret)
2145 return ret;
2146
2147 /*
2148 * Must be before add_{jump_call}_destination.
2149 */
2150 ret = classify_symbols(file);
2151 if (ret)
2152 return ret;
2153
2154 /*
2155 * Must be before add_special_section_alts() as that depends on
2156 * jump_dest being set.
2157 */
2158 ret = add_jump_destinations(file);
2159 if (ret)
2160 return ret;
2161
2162 ret = add_special_section_alts(file);
2163 if (ret)
2164 return ret;
2165
2166 /*
2167 * Must be before add_call_destination(); it changes INSN_CALL to
2168 * INSN_JUMP.
2169 */
2170 ret = read_intra_function_calls(file);
2171 if (ret)
2172 return ret;
2173
2174 ret = add_call_destinations(file);
2175 if (ret)
2176 return ret;
2177
2178 ret = add_jump_table_alts(file);
2179 if (ret)
2180 return ret;
2181
2182 ret = read_unwind_hints(file);
2183 if (ret)
2184 return ret;
2185
2186 ret = read_retpoline_hints(file);
2187 if (ret)
2188 return ret;
2189
2190 ret = read_instr_hints(file);
2191 if (ret)
2192 return ret;
2193
2194 return 0;
2195 }
2196
is_special_call(struct instruction * insn)2197 static bool is_special_call(struct instruction *insn)
2198 {
2199 if (insn->type == INSN_CALL) {
2200 struct symbol *dest = insn->call_dest;
2201
2202 if (!dest)
2203 return false;
2204
2205 if (dest->fentry || dest->embedded_insn)
2206 return true;
2207 }
2208
2209 return false;
2210 }
2211
has_modified_stack_frame(struct instruction * insn,struct insn_state * state)2212 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2213 {
2214 struct cfi_state *cfi = &state->cfi;
2215 int i;
2216
2217 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2218 return true;
2219
2220 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2221 return true;
2222
2223 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2224 return true;
2225
2226 for (i = 0; i < CFI_NUM_REGS; i++) {
2227 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2228 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2229 return true;
2230 }
2231
2232 return false;
2233 }
2234
check_reg_frame_pos(const struct cfi_reg * reg,int expected_offset)2235 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2236 int expected_offset)
2237 {
2238 return reg->base == CFI_CFA &&
2239 reg->offset == expected_offset;
2240 }
2241
has_valid_stack_frame(struct insn_state * state)2242 static bool has_valid_stack_frame(struct insn_state *state)
2243 {
2244 struct cfi_state *cfi = &state->cfi;
2245
2246 if (cfi->cfa.base == CFI_BP &&
2247 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2248 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2249 return true;
2250
2251 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2252 return true;
2253
2254 return false;
2255 }
2256
update_cfi_state_regs(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2257 static int update_cfi_state_regs(struct instruction *insn,
2258 struct cfi_state *cfi,
2259 struct stack_op *op)
2260 {
2261 struct cfi_reg *cfa = &cfi->cfa;
2262
2263 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2264 return 0;
2265
2266 /* push */
2267 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2268 cfa->offset += 8;
2269
2270 /* pop */
2271 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2272 cfa->offset -= 8;
2273
2274 /* add immediate to sp */
2275 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2276 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2277 cfa->offset -= op->src.offset;
2278
2279 return 0;
2280 }
2281
save_reg(struct cfi_state * cfi,unsigned char reg,int base,int offset)2282 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2283 {
2284 if (arch_callee_saved_reg(reg) &&
2285 cfi->regs[reg].base == CFI_UNDEFINED) {
2286 cfi->regs[reg].base = base;
2287 cfi->regs[reg].offset = offset;
2288 }
2289 }
2290
restore_reg(struct cfi_state * cfi,unsigned char reg)2291 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2292 {
2293 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2294 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2295 }
2296
2297 /*
2298 * A note about DRAP stack alignment:
2299 *
2300 * GCC has the concept of a DRAP register, which is used to help keep track of
2301 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2302 * register. The typical DRAP pattern is:
2303 *
2304 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2305 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2306 * 41 ff 72 f8 pushq -0x8(%r10)
2307 * 55 push %rbp
2308 * 48 89 e5 mov %rsp,%rbp
2309 * (more pushes)
2310 * 41 52 push %r10
2311 * ...
2312 * 41 5a pop %r10
2313 * (more pops)
2314 * 5d pop %rbp
2315 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2316 * c3 retq
2317 *
2318 * There are some variations in the epilogues, like:
2319 *
2320 * 5b pop %rbx
2321 * 41 5a pop %r10
2322 * 41 5c pop %r12
2323 * 41 5d pop %r13
2324 * 41 5e pop %r14
2325 * c9 leaveq
2326 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2327 * c3 retq
2328 *
2329 * and:
2330 *
2331 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2332 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2333 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2334 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2335 * c9 leaveq
2336 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2337 * c3 retq
2338 *
2339 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2340 * restored beforehand:
2341 *
2342 * 41 55 push %r13
2343 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2344 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2345 * ...
2346 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2347 * 41 5d pop %r13
2348 * c3 retq
2349 */
update_cfi_state(struct instruction * insn,struct instruction * next_insn,struct cfi_state * cfi,struct stack_op * op)2350 static int update_cfi_state(struct instruction *insn,
2351 struct instruction *next_insn,
2352 struct cfi_state *cfi, struct stack_op *op)
2353 {
2354 struct cfi_reg *cfa = &cfi->cfa;
2355 struct cfi_reg *regs = cfi->regs;
2356
2357 /* stack operations don't make sense with an undefined CFA */
2358 if (cfa->base == CFI_UNDEFINED) {
2359 if (insn->func) {
2360 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2361 return -1;
2362 }
2363 return 0;
2364 }
2365
2366 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2367 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2368 return update_cfi_state_regs(insn, cfi, op);
2369
2370 switch (op->dest.type) {
2371
2372 case OP_DEST_REG:
2373 switch (op->src.type) {
2374
2375 case OP_SRC_REG:
2376 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2377 cfa->base == CFI_SP &&
2378 check_reg_frame_pos(®s[CFI_BP], -cfa->offset)) {
2379
2380 /* mov %rsp, %rbp */
2381 cfa->base = op->dest.reg;
2382 cfi->bp_scratch = false;
2383 }
2384
2385 else if (op->src.reg == CFI_SP &&
2386 op->dest.reg == CFI_BP && cfi->drap) {
2387
2388 /* drap: mov %rsp, %rbp */
2389 regs[CFI_BP].base = CFI_BP;
2390 regs[CFI_BP].offset = -cfi->stack_size;
2391 cfi->bp_scratch = false;
2392 }
2393
2394 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2395
2396 /*
2397 * mov %rsp, %reg
2398 *
2399 * This is needed for the rare case where GCC
2400 * does:
2401 *
2402 * mov %rsp, %rax
2403 * ...
2404 * mov %rax, %rsp
2405 */
2406 cfi->vals[op->dest.reg].base = CFI_CFA;
2407 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2408 }
2409
2410 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2411 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2412
2413 /*
2414 * mov %rbp, %rsp
2415 *
2416 * Restore the original stack pointer (Clang).
2417 */
2418 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2419 }
2420
2421 else if (op->dest.reg == cfa->base) {
2422
2423 /* mov %reg, %rsp */
2424 if (cfa->base == CFI_SP &&
2425 cfi->vals[op->src.reg].base == CFI_CFA) {
2426
2427 /*
2428 * This is needed for the rare case
2429 * where GCC does something dumb like:
2430 *
2431 * lea 0x8(%rsp), %rcx
2432 * ...
2433 * mov %rcx, %rsp
2434 */
2435 cfa->offset = -cfi->vals[op->src.reg].offset;
2436 cfi->stack_size = cfa->offset;
2437
2438 } else if (cfa->base == CFI_SP &&
2439 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2440 cfi->vals[op->src.reg].offset == cfa->offset) {
2441
2442 /*
2443 * Stack swizzle:
2444 *
2445 * 1: mov %rsp, (%[tos])
2446 * 2: mov %[tos], %rsp
2447 * ...
2448 * 3: pop %rsp
2449 *
2450 * Where:
2451 *
2452 * 1 - places a pointer to the previous
2453 * stack at the Top-of-Stack of the
2454 * new stack.
2455 *
2456 * 2 - switches to the new stack.
2457 *
2458 * 3 - pops the Top-of-Stack to restore
2459 * the original stack.
2460 *
2461 * Note: we set base to SP_INDIRECT
2462 * here and preserve offset. Therefore
2463 * when the unwinder reaches ToS it
2464 * will dereference SP and then add the
2465 * offset to find the next frame, IOW:
2466 * (%rsp) + offset.
2467 */
2468 cfa->base = CFI_SP_INDIRECT;
2469
2470 } else {
2471 cfa->base = CFI_UNDEFINED;
2472 cfa->offset = 0;
2473 }
2474 }
2475
2476 else if (op->dest.reg == CFI_SP &&
2477 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2478 cfi->vals[op->src.reg].offset == cfa->offset) {
2479
2480 /*
2481 * The same stack swizzle case 2) as above. But
2482 * because we can't change cfa->base, case 3)
2483 * will become a regular POP. Pretend we're a
2484 * PUSH so things don't go unbalanced.
2485 */
2486 cfi->stack_size += 8;
2487 }
2488
2489
2490 break;
2491
2492 case OP_SRC_ADD:
2493 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2494
2495 /* add imm, %rsp */
2496 cfi->stack_size -= op->src.offset;
2497 if (cfa->base == CFI_SP)
2498 cfa->offset -= op->src.offset;
2499 break;
2500 }
2501
2502 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2503
2504 /* lea disp(%rbp), %rsp */
2505 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2506 break;
2507 }
2508
2509 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2510
2511 /* drap: lea disp(%rsp), %drap */
2512 cfi->drap_reg = op->dest.reg;
2513
2514 /*
2515 * lea disp(%rsp), %reg
2516 *
2517 * This is needed for the rare case where GCC
2518 * does something dumb like:
2519 *
2520 * lea 0x8(%rsp), %rcx
2521 * ...
2522 * mov %rcx, %rsp
2523 */
2524 cfi->vals[op->dest.reg].base = CFI_CFA;
2525 cfi->vals[op->dest.reg].offset = \
2526 -cfi->stack_size + op->src.offset;
2527
2528 break;
2529 }
2530
2531 if (cfi->drap && op->dest.reg == CFI_SP &&
2532 op->src.reg == cfi->drap_reg) {
2533
2534 /* drap: lea disp(%drap), %rsp */
2535 cfa->base = CFI_SP;
2536 cfa->offset = cfi->stack_size = -op->src.offset;
2537 cfi->drap_reg = CFI_UNDEFINED;
2538 cfi->drap = false;
2539 break;
2540 }
2541
2542 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2543 WARN_FUNC("unsupported stack register modification",
2544 insn->sec, insn->offset);
2545 return -1;
2546 }
2547
2548 break;
2549
2550 case OP_SRC_AND:
2551 if (op->dest.reg != CFI_SP ||
2552 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2553 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2554 WARN_FUNC("unsupported stack pointer realignment",
2555 insn->sec, insn->offset);
2556 return -1;
2557 }
2558
2559 if (cfi->drap_reg != CFI_UNDEFINED) {
2560 /* drap: and imm, %rsp */
2561 cfa->base = cfi->drap_reg;
2562 cfa->offset = cfi->stack_size = 0;
2563 cfi->drap = true;
2564 }
2565
2566 /*
2567 * Older versions of GCC (4.8ish) realign the stack
2568 * without DRAP, with a frame pointer.
2569 */
2570
2571 break;
2572
2573 case OP_SRC_POP:
2574 case OP_SRC_POPF:
2575 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2576
2577 /* pop %rsp; # restore from a stack swizzle */
2578 cfa->base = CFI_SP;
2579 break;
2580 }
2581
2582 if (!cfi->drap && op->dest.reg == cfa->base) {
2583
2584 /* pop %rbp */
2585 cfa->base = CFI_SP;
2586 }
2587
2588 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2589 op->dest.reg == cfi->drap_reg &&
2590 cfi->drap_offset == -cfi->stack_size) {
2591
2592 /* drap: pop %drap */
2593 cfa->base = cfi->drap_reg;
2594 cfa->offset = 0;
2595 cfi->drap_offset = -1;
2596
2597 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
2598
2599 /* pop %reg */
2600 restore_reg(cfi, op->dest.reg);
2601 }
2602
2603 cfi->stack_size -= 8;
2604 if (cfa->base == CFI_SP)
2605 cfa->offset -= 8;
2606
2607 break;
2608
2609 case OP_SRC_REG_INDIRECT:
2610 if (!cfi->drap && op->dest.reg == cfa->base &&
2611 op->dest.reg == CFI_BP) {
2612
2613 /* mov disp(%rsp), %rbp */
2614 cfa->base = CFI_SP;
2615 cfa->offset = cfi->stack_size;
2616 }
2617
2618 if (cfi->drap && op->src.reg == CFI_BP &&
2619 op->src.offset == cfi->drap_offset) {
2620
2621 /* drap: mov disp(%rbp), %drap */
2622 cfa->base = cfi->drap_reg;
2623 cfa->offset = 0;
2624 cfi->drap_offset = -1;
2625 }
2626
2627 if (cfi->drap && op->src.reg == CFI_BP &&
2628 op->src.offset == regs[op->dest.reg].offset) {
2629
2630 /* drap: mov disp(%rbp), %reg */
2631 restore_reg(cfi, op->dest.reg);
2632
2633 } else if (op->src.reg == cfa->base &&
2634 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2635
2636 /* mov disp(%rbp), %reg */
2637 /* mov disp(%rsp), %reg */
2638 restore_reg(cfi, op->dest.reg);
2639
2640 } else if (op->src.reg == CFI_SP &&
2641 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
2642
2643 /* mov disp(%rsp), %reg */
2644 restore_reg(cfi, op->dest.reg);
2645 }
2646
2647 break;
2648
2649 default:
2650 WARN_FUNC("unknown stack-related instruction",
2651 insn->sec, insn->offset);
2652 return -1;
2653 }
2654
2655 break;
2656
2657 case OP_DEST_PUSH:
2658 case OP_DEST_PUSHF:
2659 cfi->stack_size += 8;
2660 if (cfa->base == CFI_SP)
2661 cfa->offset += 8;
2662
2663 if (op->src.type != OP_SRC_REG)
2664 break;
2665
2666 if (cfi->drap) {
2667 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2668
2669 /* drap: push %drap */
2670 cfa->base = CFI_BP_INDIRECT;
2671 cfa->offset = -cfi->stack_size;
2672
2673 /* save drap so we know when to restore it */
2674 cfi->drap_offset = -cfi->stack_size;
2675
2676 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2677
2678 /* drap: push %rbp */
2679 cfi->stack_size = 0;
2680
2681 } else {
2682
2683 /* drap: push %reg */
2684 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2685 }
2686
2687 } else {
2688
2689 /* push %reg */
2690 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2691 }
2692
2693 /* detect when asm code uses rbp as a scratch register */
2694 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2695 cfa->base != CFI_BP)
2696 cfi->bp_scratch = true;
2697 break;
2698
2699 case OP_DEST_REG_INDIRECT:
2700
2701 if (cfi->drap) {
2702 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2703
2704 /* drap: mov %drap, disp(%rbp) */
2705 cfa->base = CFI_BP_INDIRECT;
2706 cfa->offset = op->dest.offset;
2707
2708 /* save drap offset so we know when to restore it */
2709 cfi->drap_offset = op->dest.offset;
2710 } else {
2711
2712 /* drap: mov reg, disp(%rbp) */
2713 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2714 }
2715
2716 } else if (op->dest.reg == cfa->base) {
2717
2718 /* mov reg, disp(%rbp) */
2719 /* mov reg, disp(%rsp) */
2720 save_reg(cfi, op->src.reg, CFI_CFA,
2721 op->dest.offset - cfi->cfa.offset);
2722
2723 } else if (op->dest.reg == CFI_SP) {
2724
2725 /* mov reg, disp(%rsp) */
2726 save_reg(cfi, op->src.reg, CFI_CFA,
2727 op->dest.offset - cfi->stack_size);
2728
2729 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
2730
2731 /* mov %rsp, (%reg); # setup a stack swizzle. */
2732 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
2733 cfi->vals[op->dest.reg].offset = cfa->offset;
2734 }
2735
2736 break;
2737
2738 case OP_DEST_MEM:
2739 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2740 WARN_FUNC("unknown stack-related memory operation",
2741 insn->sec, insn->offset);
2742 return -1;
2743 }
2744
2745 /* pop mem */
2746 cfi->stack_size -= 8;
2747 if (cfa->base == CFI_SP)
2748 cfa->offset -= 8;
2749
2750 break;
2751
2752 default:
2753 WARN_FUNC("unknown stack-related instruction",
2754 insn->sec, insn->offset);
2755 return -1;
2756 }
2757
2758 return 0;
2759 }
2760
2761 /*
2762 * The stack layouts of alternatives instructions can sometimes diverge when
2763 * they have stack modifications. That's fine as long as the potential stack
2764 * layouts don't conflict at any given potential instruction boundary.
2765 *
2766 * Flatten the CFIs of the different alternative code streams (both original
2767 * and replacement) into a single shared CFI array which can be used to detect
2768 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2769 */
propagate_alt_cfi(struct objtool_file * file,struct instruction * insn)2770 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2771 {
2772 struct cfi_state **alt_cfi;
2773 int group_off;
2774
2775 if (!insn->alt_group)
2776 return 0;
2777
2778 if (!insn->cfi) {
2779 WARN("CFI missing");
2780 return -1;
2781 }
2782
2783 alt_cfi = insn->alt_group->cfi;
2784 group_off = insn->offset - insn->alt_group->first_insn->offset;
2785
2786 if (!alt_cfi[group_off]) {
2787 alt_cfi[group_off] = insn->cfi;
2788 } else {
2789 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2790 WARN_FUNC("stack layout conflict in alternatives",
2791 insn->sec, insn->offset);
2792 return -1;
2793 }
2794 }
2795
2796 return 0;
2797 }
2798
handle_insn_ops(struct instruction * insn,struct instruction * next_insn,struct insn_state * state)2799 static int handle_insn_ops(struct instruction *insn,
2800 struct instruction *next_insn,
2801 struct insn_state *state)
2802 {
2803 struct stack_op *op;
2804
2805 list_for_each_entry(op, &insn->stack_ops, list) {
2806
2807 if (update_cfi_state(insn, next_insn, &state->cfi, op))
2808 return 1;
2809
2810 if (!insn->alt_group)
2811 continue;
2812
2813 if (op->dest.type == OP_DEST_PUSHF) {
2814 if (!state->uaccess_stack) {
2815 state->uaccess_stack = 1;
2816 } else if (state->uaccess_stack >> 31) {
2817 WARN_FUNC("PUSHF stack exhausted",
2818 insn->sec, insn->offset);
2819 return 1;
2820 }
2821 state->uaccess_stack <<= 1;
2822 state->uaccess_stack |= state->uaccess;
2823 }
2824
2825 if (op->src.type == OP_SRC_POPF) {
2826 if (state->uaccess_stack) {
2827 state->uaccess = state->uaccess_stack & 1;
2828 state->uaccess_stack >>= 1;
2829 if (state->uaccess_stack == 1)
2830 state->uaccess_stack = 0;
2831 }
2832 }
2833 }
2834
2835 return 0;
2836 }
2837
insn_cfi_match(struct instruction * insn,struct cfi_state * cfi2)2838 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2839 {
2840 struct cfi_state *cfi1 = insn->cfi;
2841 int i;
2842
2843 if (!cfi1) {
2844 WARN("CFI missing");
2845 return false;
2846 }
2847
2848 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2849
2850 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2851 insn->sec, insn->offset,
2852 cfi1->cfa.base, cfi1->cfa.offset,
2853 cfi2->cfa.base, cfi2->cfa.offset);
2854
2855 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2856 for (i = 0; i < CFI_NUM_REGS; i++) {
2857 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2858 sizeof(struct cfi_reg)))
2859 continue;
2860
2861 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2862 insn->sec, insn->offset,
2863 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2864 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2865 break;
2866 }
2867
2868 } else if (cfi1->type != cfi2->type) {
2869
2870 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2871 insn->sec, insn->offset, cfi1->type, cfi2->type);
2872
2873 } else if (cfi1->drap != cfi2->drap ||
2874 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2875 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2876
2877 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2878 insn->sec, insn->offset,
2879 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2880 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2881
2882 } else
2883 return true;
2884
2885 return false;
2886 }
2887
func_uaccess_safe(struct symbol * func)2888 static inline bool func_uaccess_safe(struct symbol *func)
2889 {
2890 if (func)
2891 return func->uaccess_safe;
2892
2893 return false;
2894 }
2895
call_dest_name(struct instruction * insn)2896 static inline const char *call_dest_name(struct instruction *insn)
2897 {
2898 if (insn->call_dest)
2899 return insn->call_dest->name;
2900
2901 return "{dynamic}";
2902 }
2903
noinstr_call_dest(struct symbol * func)2904 static inline bool noinstr_call_dest(struct symbol *func)
2905 {
2906 /*
2907 * We can't deal with indirect function calls at present;
2908 * assume they're instrumented.
2909 */
2910 if (!func)
2911 return false;
2912
2913 /*
2914 * If the symbol is from a noinstr section; we good.
2915 */
2916 if (func->sec->noinstr)
2917 return true;
2918
2919 /*
2920 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2921 * something 'BAD' happened. At the risk of taking the machine down,
2922 * let them proceed to get the message out.
2923 */
2924 if (!strncmp(func->name, "__ubsan_handle_", 15))
2925 return true;
2926
2927 return false;
2928 }
2929
validate_call(struct instruction * insn,struct insn_state * state)2930 static int validate_call(struct instruction *insn, struct insn_state *state)
2931 {
2932 if (state->noinstr && state->instr <= 0 &&
2933 !noinstr_call_dest(insn->call_dest)) {
2934 WARN_FUNC("call to %s() leaves .noinstr.text section",
2935 insn->sec, insn->offset, call_dest_name(insn));
2936 return 1;
2937 }
2938
2939 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2940 WARN_FUNC("call to %s() with UACCESS enabled",
2941 insn->sec, insn->offset, call_dest_name(insn));
2942 return 1;
2943 }
2944
2945 if (state->df) {
2946 WARN_FUNC("call to %s() with DF set",
2947 insn->sec, insn->offset, call_dest_name(insn));
2948 return 1;
2949 }
2950
2951 return 0;
2952 }
2953
validate_sibling_call(struct instruction * insn,struct insn_state * state)2954 static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2955 {
2956 if (has_modified_stack_frame(insn, state)) {
2957 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2958 insn->sec, insn->offset);
2959 return 1;
2960 }
2961
2962 return validate_call(insn, state);
2963 }
2964
validate_return(struct symbol * func,struct instruction * insn,struct insn_state * state)2965 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2966 {
2967 if (state->noinstr && state->instr > 0) {
2968 WARN_FUNC("return with instrumentation enabled",
2969 insn->sec, insn->offset);
2970 return 1;
2971 }
2972
2973 if (state->uaccess && !func_uaccess_safe(func)) {
2974 WARN_FUNC("return with UACCESS enabled",
2975 insn->sec, insn->offset);
2976 return 1;
2977 }
2978
2979 if (!state->uaccess && func_uaccess_safe(func)) {
2980 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2981 insn->sec, insn->offset);
2982 return 1;
2983 }
2984
2985 if (state->df) {
2986 WARN_FUNC("return with DF set",
2987 insn->sec, insn->offset);
2988 return 1;
2989 }
2990
2991 if (func && has_modified_stack_frame(insn, state)) {
2992 WARN_FUNC("return with modified stack frame",
2993 insn->sec, insn->offset);
2994 return 1;
2995 }
2996
2997 if (state->cfi.bp_scratch) {
2998 WARN_FUNC("BP used as a scratch register",
2999 insn->sec, insn->offset);
3000 return 1;
3001 }
3002
3003 return 0;
3004 }
3005
next_insn_to_validate(struct objtool_file * file,struct instruction * insn)3006 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3007 struct instruction *insn)
3008 {
3009 struct alt_group *alt_group = insn->alt_group;
3010
3011 /*
3012 * Simulate the fact that alternatives are patched in-place. When the
3013 * end of a replacement alt_group is reached, redirect objtool flow to
3014 * the end of the original alt_group.
3015 */
3016 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
3017 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3018
3019 return next_insn_same_sec(file, insn);
3020 }
3021
3022 /*
3023 * Follow the branch starting at the given instruction, and recursively follow
3024 * any other branches (jumps). Meanwhile, track the frame pointer state at
3025 * each instruction and validate all the rules described in
3026 * tools/objtool/Documentation/stack-validation.txt.
3027 */
validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state state)3028 static int validate_branch(struct objtool_file *file, struct symbol *func,
3029 struct instruction *insn, struct insn_state state)
3030 {
3031 struct alternative *alt;
3032 struct instruction *next_insn, *prev_insn = NULL;
3033 struct section *sec;
3034 u8 visited;
3035 int ret;
3036
3037 sec = insn->sec;
3038
3039 while (1) {
3040 next_insn = next_insn_to_validate(file, insn);
3041
3042 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
3043 WARN("%s() falls through to next function %s()",
3044 func->name, insn->func->name);
3045 return 1;
3046 }
3047
3048 if (func && insn->ignore) {
3049 WARN_FUNC("BUG: why am I validating an ignored function?",
3050 sec, insn->offset);
3051 return 1;
3052 }
3053
3054 visited = VISITED_BRANCH << state.uaccess;
3055 if (insn->visited & VISITED_BRANCH_MASK) {
3056 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3057 return 1;
3058
3059 if (insn->visited & visited)
3060 return 0;
3061 } else {
3062 nr_insns_visited++;
3063 }
3064
3065 if (state.noinstr)
3066 state.instr += insn->instr;
3067
3068 if (insn->hint) {
3069 if (insn->restore) {
3070 struct instruction *save_insn, *i;
3071
3072 i = insn;
3073 save_insn = NULL;
3074
3075 sym_for_each_insn_continue_reverse(file, func, i) {
3076 if (i->save) {
3077 save_insn = i;
3078 break;
3079 }
3080 }
3081
3082 if (!save_insn) {
3083 WARN_FUNC("no corresponding CFI save for CFI restore",
3084 sec, insn->offset);
3085 return 1;
3086 }
3087
3088 if (!save_insn->visited) {
3089 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
3090 sec, insn->offset);
3091 return 1;
3092 }
3093
3094 insn->cfi = save_insn->cfi;
3095 nr_cfi_reused++;
3096 }
3097
3098 state.cfi = *insn->cfi;
3099 } else {
3100 /* XXX track if we actually changed state.cfi */
3101
3102 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3103 insn->cfi = prev_insn->cfi;
3104 nr_cfi_reused++;
3105 } else {
3106 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3107 }
3108 }
3109
3110 insn->visited |= visited;
3111
3112 if (propagate_alt_cfi(file, insn))
3113 return 1;
3114
3115 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3116 bool skip_orig = false;
3117
3118 list_for_each_entry(alt, &insn->alts, list) {
3119 if (alt->skip_orig)
3120 skip_orig = true;
3121
3122 ret = validate_branch(file, func, alt->insn, state);
3123 if (ret) {
3124 if (backtrace)
3125 BT_FUNC("(alt)", insn);
3126 return ret;
3127 }
3128 }
3129
3130 if (skip_orig)
3131 return 0;
3132 }
3133
3134 if (handle_insn_ops(insn, next_insn, &state))
3135 return 1;
3136
3137 switch (insn->type) {
3138
3139 case INSN_RETURN:
3140 if (sls && !insn->retpoline_safe &&
3141 next_insn && next_insn->type != INSN_TRAP) {
3142 WARN_FUNC("missing int3 after ret",
3143 insn->sec, insn->offset);
3144 }
3145 return validate_return(func, insn, &state);
3146
3147 case INSN_CALL:
3148 case INSN_CALL_DYNAMIC:
3149 ret = validate_call(insn, &state);
3150 if (ret)
3151 return ret;
3152
3153 if (!no_fp && func && !is_special_call(insn) &&
3154 !has_valid_stack_frame(&state)) {
3155 WARN_FUNC("call without frame pointer save/setup",
3156 sec, insn->offset);
3157 return 1;
3158 }
3159
3160 if (dead_end_function(file, insn->call_dest))
3161 return 0;
3162
3163 break;
3164
3165 case INSN_JUMP_CONDITIONAL:
3166 case INSN_JUMP_UNCONDITIONAL:
3167 if (is_sibling_call(insn)) {
3168 ret = validate_sibling_call(insn, &state);
3169 if (ret)
3170 return ret;
3171
3172 } else if (insn->jump_dest) {
3173 ret = validate_branch(file, func,
3174 insn->jump_dest, state);
3175 if (ret) {
3176 if (backtrace)
3177 BT_FUNC("(branch)", insn);
3178 return ret;
3179 }
3180 }
3181
3182 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3183 return 0;
3184
3185 break;
3186
3187 case INSN_JUMP_DYNAMIC:
3188 if (sls && !insn->retpoline_safe &&
3189 next_insn && next_insn->type != INSN_TRAP) {
3190 WARN_FUNC("missing int3 after indirect jump",
3191 insn->sec, insn->offset);
3192 }
3193
3194 /* fallthrough */
3195 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3196 if (is_sibling_call(insn)) {
3197 ret = validate_sibling_call(insn, &state);
3198 if (ret)
3199 return ret;
3200 }
3201
3202 if (insn->type == INSN_JUMP_DYNAMIC)
3203 return 0;
3204
3205 break;
3206
3207 case INSN_CONTEXT_SWITCH:
3208 if (func && (!next_insn || !next_insn->hint)) {
3209 WARN_FUNC("unsupported instruction in callable function",
3210 sec, insn->offset);
3211 return 1;
3212 }
3213 return 0;
3214
3215 case INSN_STAC:
3216 if (state.uaccess) {
3217 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3218 return 1;
3219 }
3220
3221 state.uaccess = true;
3222 break;
3223
3224 case INSN_CLAC:
3225 if (!state.uaccess && func) {
3226 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3227 return 1;
3228 }
3229
3230 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3231 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3232 return 1;
3233 }
3234
3235 state.uaccess = false;
3236 break;
3237
3238 case INSN_STD:
3239 if (state.df) {
3240 WARN_FUNC("recursive STD", sec, insn->offset);
3241 return 1;
3242 }
3243
3244 state.df = true;
3245 break;
3246
3247 case INSN_CLD:
3248 if (!state.df && func) {
3249 WARN_FUNC("redundant CLD", sec, insn->offset);
3250 return 1;
3251 }
3252
3253 state.df = false;
3254 break;
3255
3256 default:
3257 break;
3258 }
3259
3260 if (insn->dead_end)
3261 return 0;
3262
3263 if (!next_insn) {
3264 if (state.cfi.cfa.base == CFI_UNDEFINED)
3265 return 0;
3266 WARN("%s: unexpected end of section", sec->name);
3267 return 1;
3268 }
3269
3270 prev_insn = insn;
3271 insn = next_insn;
3272 }
3273
3274 return 0;
3275 }
3276
validate_unwind_hints(struct objtool_file * file,struct section * sec)3277 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3278 {
3279 struct instruction *insn;
3280 struct insn_state state;
3281 int ret, warnings = 0;
3282
3283 if (!file->hints)
3284 return 0;
3285
3286 init_insn_state(&state, sec);
3287
3288 if (sec) {
3289 insn = find_insn(file, sec, 0);
3290 if (!insn)
3291 return 0;
3292 } else {
3293 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3294 }
3295
3296 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3297 if (insn->hint && !insn->visited) {
3298 ret = validate_branch(file, insn->func, insn, state);
3299 if (ret && backtrace)
3300 BT_FUNC("<=== (hint)", insn);
3301 warnings += ret;
3302 }
3303
3304 insn = list_next_entry(insn, list);
3305 }
3306
3307 return warnings;
3308 }
3309
3310 /*
3311 * Validate rethunk entry constraint: must untrain RET before the first RET.
3312 *
3313 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3314 * before an actual RET instruction.
3315 */
validate_entry(struct objtool_file * file,struct instruction * insn)3316 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3317 {
3318 struct instruction *next, *dest;
3319 int ret, warnings = 0;
3320
3321 for (;;) {
3322 next = next_insn_to_validate(file, insn);
3323
3324 if (insn->visited & VISITED_ENTRY)
3325 return 0;
3326
3327 insn->visited |= VISITED_ENTRY;
3328
3329 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3330 struct alternative *alt;
3331 bool skip_orig = false;
3332
3333 list_for_each_entry(alt, &insn->alts, list) {
3334 if (alt->skip_orig)
3335 skip_orig = true;
3336
3337 ret = validate_entry(file, alt->insn);
3338 if (ret) {
3339 if (backtrace)
3340 BT_FUNC("(alt)", insn);
3341 return ret;
3342 }
3343 }
3344
3345 if (skip_orig)
3346 return 0;
3347 }
3348
3349 switch (insn->type) {
3350
3351 case INSN_CALL_DYNAMIC:
3352 case INSN_JUMP_DYNAMIC:
3353 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3354 WARN_FUNC("early indirect call", insn->sec, insn->offset);
3355 return 1;
3356
3357 case INSN_JUMP_UNCONDITIONAL:
3358 case INSN_JUMP_CONDITIONAL:
3359 if (!is_sibling_call(insn)) {
3360 if (!insn->jump_dest) {
3361 WARN_FUNC("unresolved jump target after linking?!?",
3362 insn->sec, insn->offset);
3363 return -1;
3364 }
3365 ret = validate_entry(file, insn->jump_dest);
3366 if (ret) {
3367 if (backtrace) {
3368 BT_FUNC("(branch%s)", insn,
3369 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3370 }
3371 return ret;
3372 }
3373
3374 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3375 return 0;
3376
3377 break;
3378 }
3379
3380 /* fallthrough */
3381 case INSN_CALL:
3382 dest = find_insn(file, insn->call_dest->sec,
3383 insn->call_dest->offset);
3384 if (!dest) {
3385 WARN("Unresolved function after linking!?: %s",
3386 insn->call_dest->name);
3387 return -1;
3388 }
3389
3390 ret = validate_entry(file, dest);
3391 if (ret) {
3392 if (backtrace)
3393 BT_FUNC("(call)", insn);
3394 return ret;
3395 }
3396 /*
3397 * If a call returns without error, it must have seen UNTRAIN_RET.
3398 * Therefore any non-error return is a success.
3399 */
3400 return 0;
3401
3402 case INSN_RETURN:
3403 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3404 return 1;
3405
3406 case INSN_NOP:
3407 if (insn->retpoline_safe)
3408 return 0;
3409 break;
3410
3411 default:
3412 break;
3413 }
3414
3415 if (!next) {
3416 WARN_FUNC("teh end!", insn->sec, insn->offset);
3417 return -1;
3418 }
3419 insn = next;
3420 }
3421
3422 return warnings;
3423 }
3424
3425 /*
3426 * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3427 * before RET.
3428 */
validate_unret(struct objtool_file * file)3429 static int validate_unret(struct objtool_file *file)
3430 {
3431 struct instruction *insn;
3432 int ret, warnings = 0;
3433
3434 for_each_insn(file, insn) {
3435 if (!insn->entry)
3436 continue;
3437
3438 ret = validate_entry(file, insn);
3439 if (ret < 0) {
3440 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3441 return ret;
3442 }
3443 warnings += ret;
3444 }
3445
3446 return warnings;
3447 }
3448
validate_retpoline(struct objtool_file * file)3449 static int validate_retpoline(struct objtool_file *file)
3450 {
3451 struct instruction *insn;
3452 int warnings = 0;
3453
3454 for_each_insn(file, insn) {
3455 if (insn->type != INSN_JUMP_DYNAMIC &&
3456 insn->type != INSN_CALL_DYNAMIC &&
3457 insn->type != INSN_RETURN)
3458 continue;
3459
3460 if (insn->retpoline_safe)
3461 continue;
3462
3463 /*
3464 * .init.text code is ran before userspace and thus doesn't
3465 * strictly need retpolines, except for modules which are
3466 * loaded late, they very much do need retpoline in their
3467 * .init.text
3468 */
3469 if (!strcmp(insn->sec->name, ".init.text") && !module)
3470 continue;
3471
3472 if (insn->type == INSN_RETURN) {
3473 if (rethunk) {
3474 WARN_FUNC("'naked' return found in RETHUNK build",
3475 insn->sec, insn->offset);
3476 } else
3477 continue;
3478 } else {
3479 WARN_FUNC("indirect %s found in RETPOLINE build",
3480 insn->sec, insn->offset,
3481 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3482 }
3483
3484 warnings++;
3485 }
3486
3487 return warnings;
3488 }
3489
is_kasan_insn(struct instruction * insn)3490 static bool is_kasan_insn(struct instruction *insn)
3491 {
3492 return (insn->type == INSN_CALL &&
3493 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3494 }
3495
is_ubsan_insn(struct instruction * insn)3496 static bool is_ubsan_insn(struct instruction *insn)
3497 {
3498 return (insn->type == INSN_CALL &&
3499 !strcmp(insn->call_dest->name,
3500 "__ubsan_handle_builtin_unreachable"));
3501 }
3502
ignore_unreachable_insn(struct objtool_file * file,struct instruction * insn)3503 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3504 {
3505 int i;
3506 struct instruction *prev_insn;
3507
3508 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3509 return true;
3510
3511 /*
3512 * Ignore any unused exceptions. This can happen when a whitelisted
3513 * function has an exception table entry.
3514 *
3515 * Also ignore alternative replacement instructions. This can happen
3516 * when a whitelisted function uses one of the ALTERNATIVE macros.
3517 */
3518 if (!strcmp(insn->sec->name, ".fixup") ||
3519 !strcmp(insn->sec->name, ".altinstr_replacement") ||
3520 !strcmp(insn->sec->name, ".altinstr_aux"))
3521 return true;
3522
3523 if (!insn->func)
3524 return false;
3525
3526 /*
3527 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3528 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3529 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3530 * (or occasionally a JMP to UD2).
3531 *
3532 * It may also insert a UD2 after calling a __noreturn function.
3533 */
3534 prev_insn = list_prev_entry(insn, list);
3535 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3536 (insn->type == INSN_BUG ||
3537 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3538 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3539 return true;
3540
3541 /*
3542 * Check if this (or a subsequent) instruction is related to
3543 * CONFIG_UBSAN or CONFIG_KASAN.
3544 *
3545 * End the search at 5 instructions to avoid going into the weeds.
3546 */
3547 for (i = 0; i < 5; i++) {
3548
3549 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3550 return true;
3551
3552 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3553 if (insn->jump_dest &&
3554 insn->jump_dest->func == insn->func) {
3555 insn = insn->jump_dest;
3556 continue;
3557 }
3558
3559 break;
3560 }
3561
3562 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3563 break;
3564
3565 insn = list_next_entry(insn, list);
3566 }
3567
3568 return false;
3569 }
3570
validate_symbol(struct objtool_file * file,struct section * sec,struct symbol * sym,struct insn_state * state)3571 static int validate_symbol(struct objtool_file *file, struct section *sec,
3572 struct symbol *sym, struct insn_state *state)
3573 {
3574 struct instruction *insn;
3575 int ret;
3576
3577 if (!sym->len) {
3578 WARN("%s() is missing an ELF size annotation", sym->name);
3579 return 1;
3580 }
3581
3582 if (sym->pfunc != sym || sym->alias != sym)
3583 return 0;
3584
3585 insn = find_insn(file, sec, sym->offset);
3586 if (!insn || insn->ignore || insn->visited)
3587 return 0;
3588
3589 state->uaccess = sym->uaccess_safe;
3590
3591 ret = validate_branch(file, insn->func, insn, *state);
3592 if (ret && backtrace)
3593 BT_FUNC("<=== (sym)", insn);
3594 return ret;
3595 }
3596
validate_section(struct objtool_file * file,struct section * sec)3597 static int validate_section(struct objtool_file *file, struct section *sec)
3598 {
3599 struct insn_state state;
3600 struct symbol *func;
3601 int warnings = 0;
3602
3603 list_for_each_entry(func, &sec->symbol_list, list) {
3604 if (func->type != STT_FUNC)
3605 continue;
3606
3607 init_insn_state(&state, sec);
3608 set_func_state(&state.cfi);
3609
3610 warnings += validate_symbol(file, sec, func, &state);
3611 }
3612
3613 return warnings;
3614 }
3615
validate_vmlinux_functions(struct objtool_file * file)3616 static int validate_vmlinux_functions(struct objtool_file *file)
3617 {
3618 struct section *sec;
3619 int warnings = 0;
3620
3621 sec = find_section_by_name(file->elf, ".noinstr.text");
3622 if (sec) {
3623 warnings += validate_section(file, sec);
3624 warnings += validate_unwind_hints(file, sec);
3625 }
3626
3627 sec = find_section_by_name(file->elf, ".entry.text");
3628 if (sec) {
3629 warnings += validate_section(file, sec);
3630 warnings += validate_unwind_hints(file, sec);
3631 }
3632
3633 return warnings;
3634 }
3635
validate_functions(struct objtool_file * file)3636 static int validate_functions(struct objtool_file *file)
3637 {
3638 struct section *sec;
3639 int warnings = 0;
3640
3641 for_each_sec(file, sec) {
3642 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3643 continue;
3644
3645 warnings += validate_section(file, sec);
3646 }
3647
3648 return warnings;
3649 }
3650
validate_reachable_instructions(struct objtool_file * file)3651 static int validate_reachable_instructions(struct objtool_file *file)
3652 {
3653 struct instruction *insn;
3654
3655 if (file->ignore_unreachables)
3656 return 0;
3657
3658 for_each_insn(file, insn) {
3659 if (insn->visited || ignore_unreachable_insn(file, insn))
3660 continue;
3661
3662 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3663 return 1;
3664 }
3665
3666 return 0;
3667 }
3668
check(struct objtool_file * file)3669 int check(struct objtool_file *file)
3670 {
3671 int ret, warnings = 0;
3672
3673 arch_initial_func_cfi_state(&initial_func_cfi);
3674 init_cfi_state(&init_cfi);
3675 init_cfi_state(&func_cfi);
3676 set_func_state(&func_cfi);
3677
3678 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
3679 goto out;
3680
3681 cfi_hash_add(&init_cfi);
3682 cfi_hash_add(&func_cfi);
3683
3684 ret = decode_sections(file);
3685 if (ret < 0)
3686 goto out;
3687
3688 warnings += ret;
3689
3690 if (list_empty(&file->insn_list))
3691 goto out;
3692
3693 if (vmlinux && !validate_dup) {
3694 ret = validate_vmlinux_functions(file);
3695 if (ret < 0)
3696 goto out;
3697
3698 warnings += ret;
3699 goto out;
3700 }
3701
3702 if (retpoline) {
3703 ret = validate_retpoline(file);
3704 if (ret < 0)
3705 return ret;
3706 warnings += ret;
3707 }
3708
3709 ret = validate_functions(file);
3710 if (ret < 0)
3711 goto out;
3712 warnings += ret;
3713
3714 ret = validate_unwind_hints(file, NULL);
3715 if (ret < 0)
3716 goto out;
3717 warnings += ret;
3718
3719 if (unret) {
3720 /*
3721 * Must be after validate_branch() and friends, it plays
3722 * further games with insn->visited.
3723 */
3724 ret = validate_unret(file);
3725 if (ret < 0)
3726 return ret;
3727 warnings += ret;
3728 }
3729
3730 if (!warnings) {
3731 ret = validate_reachable_instructions(file);
3732 if (ret < 0)
3733 goto out;
3734 warnings += ret;
3735 }
3736
3737 ret = create_static_call_sections(file);
3738 if (ret < 0)
3739 goto out;
3740 warnings += ret;
3741
3742 if (retpoline) {
3743 ret = create_retpoline_sites_sections(file);
3744 if (ret < 0)
3745 goto out;
3746 warnings += ret;
3747 }
3748
3749 if (rethunk) {
3750 ret = create_return_sites_sections(file);
3751 if (ret < 0)
3752 goto out;
3753 warnings += ret;
3754 }
3755
3756 if (mcount) {
3757 ret = create_mcount_loc_sections(file);
3758 if (ret < 0)
3759 goto out;
3760 warnings += ret;
3761 }
3762
3763 if (stats) {
3764 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3765 printf("nr_cfi: %ld\n", nr_cfi);
3766 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3767 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3768 }
3769
3770 out:
3771 /*
3772 * For now, don't fail the kernel build on fatal warnings. These
3773 * errors are still fairly common due to the growing matrix of
3774 * supported toolchains and their recent pace of change.
3775 */
3776 return 0;
3777 }
3778