1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <sys/mman.h>
9
10 #include "builtin.h"
11 #include "cfi.h"
12 #include "arch.h"
13 #include "check.h"
14 #include "special.h"
15 #include "warn.h"
16 #include "arch_elf.h"
17
18 #include <linux/objtool.h>
19 #include <linux/hashtable.h>
20 #include <linux/kernel.h>
21 #include <linux/static_call_types.h>
22
23 struct alternative {
24 struct list_head list;
25 struct instruction *insn;
26 bool skip_orig;
27 };
28
29 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
30
31 static struct cfi_init_state initial_func_cfi;
32 static struct cfi_state init_cfi;
33 static struct cfi_state func_cfi;
34
find_insn(struct objtool_file * file,struct section * sec,unsigned long offset)35 struct instruction *find_insn(struct objtool_file *file,
36 struct section *sec, unsigned long offset)
37 {
38 struct instruction *insn;
39
40 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
41 if (insn->sec == sec && insn->offset == offset)
42 return insn;
43 }
44
45 return NULL;
46 }
47
next_insn_same_sec(struct objtool_file * file,struct instruction * insn)48 static struct instruction *next_insn_same_sec(struct objtool_file *file,
49 struct instruction *insn)
50 {
51 struct instruction *next = list_next_entry(insn, list);
52
53 if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
54 return NULL;
55
56 return next;
57 }
58
next_insn_same_func(struct objtool_file * file,struct instruction * insn)59 static struct instruction *next_insn_same_func(struct objtool_file *file,
60 struct instruction *insn)
61 {
62 struct instruction *next = list_next_entry(insn, list);
63 struct symbol *func = insn->func;
64
65 if (!func)
66 return NULL;
67
68 if (&next->list != &file->insn_list && next->func == func)
69 return next;
70
71 /* Check if we're already in the subfunction: */
72 if (func == func->cfunc)
73 return NULL;
74
75 /* Move to the subfunction: */
76 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
77 }
78
prev_insn_same_sym(struct objtool_file * file,struct instruction * insn)79 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
80 struct instruction *insn)
81 {
82 struct instruction *prev = list_prev_entry(insn, list);
83
84 if (&prev->list != &file->insn_list && prev->func == insn->func)
85 return prev;
86
87 return NULL;
88 }
89
90 #define func_for_each_insn(file, func, insn) \
91 for (insn = find_insn(file, func->sec, func->offset); \
92 insn; \
93 insn = next_insn_same_func(file, insn))
94
95 #define sym_for_each_insn(file, sym, insn) \
96 for (insn = find_insn(file, sym->sec, sym->offset); \
97 insn && &insn->list != &file->insn_list && \
98 insn->sec == sym->sec && \
99 insn->offset < sym->offset + sym->len; \
100 insn = list_next_entry(insn, list))
101
102 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
103 for (insn = list_prev_entry(insn, list); \
104 &insn->list != &file->insn_list && \
105 insn->sec == sym->sec && insn->offset >= sym->offset; \
106 insn = list_prev_entry(insn, list))
107
108 #define sec_for_each_insn_from(file, insn) \
109 for (; insn; insn = next_insn_same_sec(file, insn))
110
111 #define sec_for_each_insn_continue(file, insn) \
112 for (insn = next_insn_same_sec(file, insn); insn; \
113 insn = next_insn_same_sec(file, insn))
114
is_jump_table_jump(struct instruction * insn)115 static bool is_jump_table_jump(struct instruction *insn)
116 {
117 struct alt_group *alt_group = insn->alt_group;
118
119 if (insn->jump_table)
120 return true;
121
122 /* Retpoline alternative for a jump table? */
123 return alt_group && alt_group->orig_group &&
124 alt_group->orig_group->first_insn->jump_table;
125 }
126
is_sibling_call(struct instruction * insn)127 static bool is_sibling_call(struct instruction *insn)
128 {
129 /*
130 * Assume only ELF functions can make sibling calls. This ensures
131 * sibling call detection consistency between vmlinux.o and individual
132 * objects.
133 */
134 if (!insn->func)
135 return false;
136
137 /* An indirect jump is either a sibling call or a jump to a table. */
138 if (insn->type == INSN_JUMP_DYNAMIC)
139 return !is_jump_table_jump(insn);
140
141 /* add_jump_destinations() sets insn->call_dest for sibling calls. */
142 return (is_static_jump(insn) && insn->call_dest);
143 }
144
145 /*
146 * This checks to see if the given function is a "noreturn" function.
147 *
148 * For global functions which are outside the scope of this object file, we
149 * have to keep a manual list of them.
150 *
151 * For local functions, we have to detect them manually by simply looking for
152 * the lack of a return instruction.
153 */
__dead_end_function(struct objtool_file * file,struct symbol * func,int recursion)154 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
155 int recursion)
156 {
157 int i;
158 struct instruction *insn;
159 bool empty = true;
160
161 /*
162 * Unfortunately these have to be hard coded because the noreturn
163 * attribute isn't provided in ELF data.
164 */
165 static const char * const global_noreturns[] = {
166 "__stack_chk_fail",
167 "panic",
168 "do_exit",
169 "do_task_dead",
170 "__module_put_and_exit",
171 "complete_and_exit",
172 "__reiserfs_panic",
173 "lbug_with_loc",
174 "fortify_panic",
175 "usercopy_abort",
176 "machine_real_restart",
177 "rewind_stack_do_exit",
178 "kunit_try_catch_throw",
179 };
180
181 if (!func)
182 return false;
183
184 if (func->bind == STB_WEAK)
185 return false;
186
187 if (func->bind == STB_GLOBAL)
188 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
189 if (!strcmp(func->name, global_noreturns[i]))
190 return true;
191
192 if (!func->len)
193 return false;
194
195 insn = find_insn(file, func->sec, func->offset);
196 if (!insn->func)
197 return false;
198
199 func_for_each_insn(file, func, insn) {
200 empty = false;
201
202 if (insn->type == INSN_RETURN)
203 return false;
204 }
205
206 if (empty)
207 return false;
208
209 /*
210 * A function can have a sibling call instead of a return. In that
211 * case, the function's dead-end status depends on whether the target
212 * of the sibling call returns.
213 */
214 func_for_each_insn(file, func, insn) {
215 if (is_sibling_call(insn)) {
216 struct instruction *dest = insn->jump_dest;
217
218 if (!dest)
219 /* sibling call to another file */
220 return false;
221
222 /* local sibling call */
223 if (recursion == 5) {
224 /*
225 * Infinite recursion: two functions have
226 * sibling calls to each other. This is a very
227 * rare case. It means they aren't dead ends.
228 */
229 return false;
230 }
231
232 return __dead_end_function(file, dest->func, recursion+1);
233 }
234 }
235
236 return true;
237 }
238
dead_end_function(struct objtool_file * file,struct symbol * func)239 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
240 {
241 return __dead_end_function(file, func, 0);
242 }
243
init_cfi_state(struct cfi_state * cfi)244 static void init_cfi_state(struct cfi_state *cfi)
245 {
246 int i;
247
248 for (i = 0; i < CFI_NUM_REGS; i++) {
249 cfi->regs[i].base = CFI_UNDEFINED;
250 cfi->vals[i].base = CFI_UNDEFINED;
251 }
252 cfi->cfa.base = CFI_UNDEFINED;
253 cfi->drap_reg = CFI_UNDEFINED;
254 cfi->drap_offset = -1;
255 }
256
init_insn_state(struct insn_state * state,struct section * sec)257 static void init_insn_state(struct insn_state *state, struct section *sec)
258 {
259 memset(state, 0, sizeof(*state));
260 init_cfi_state(&state->cfi);
261
262 /*
263 * We need the full vmlinux for noinstr validation, otherwise we can
264 * not correctly determine insn->call_dest->sec (external symbols do
265 * not have a section).
266 */
267 if (vmlinux && sec)
268 state->noinstr = sec->noinstr;
269 }
270
cfi_alloc(void)271 static struct cfi_state *cfi_alloc(void)
272 {
273 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
274 if (!cfi) {
275 WARN("calloc failed");
276 exit(1);
277 }
278 nr_cfi++;
279 return cfi;
280 }
281
282 static int cfi_bits;
283 static struct hlist_head *cfi_hash;
284
cficmp(struct cfi_state * cfi1,struct cfi_state * cfi2)285 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
286 {
287 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
288 (void *)cfi2 + sizeof(cfi2->hash),
289 sizeof(struct cfi_state) - sizeof(struct hlist_node));
290 }
291
cfi_key(struct cfi_state * cfi)292 static inline u32 cfi_key(struct cfi_state *cfi)
293 {
294 return jhash((void *)cfi + sizeof(cfi->hash),
295 sizeof(*cfi) - sizeof(cfi->hash), 0);
296 }
297
cfi_hash_find_or_add(struct cfi_state * cfi)298 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
299 {
300 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
301 struct cfi_state *obj;
302
303 hlist_for_each_entry(obj, head, hash) {
304 if (!cficmp(cfi, obj)) {
305 nr_cfi_cache++;
306 return obj;
307 }
308 }
309
310 obj = cfi_alloc();
311 *obj = *cfi;
312 hlist_add_head(&obj->hash, head);
313
314 return obj;
315 }
316
cfi_hash_add(struct cfi_state * cfi)317 static void cfi_hash_add(struct cfi_state *cfi)
318 {
319 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
320
321 hlist_add_head(&cfi->hash, head);
322 }
323
cfi_hash_alloc(void)324 static void *cfi_hash_alloc(void)
325 {
326 cfi_bits = vmlinux ? ELF_HASH_BITS - 3 : 13;
327 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
328 PROT_READ|PROT_WRITE,
329 MAP_PRIVATE|MAP_ANON, -1, 0);
330 if (cfi_hash == (void *)-1L) {
331 WARN("mmap fail cfi_hash");
332 cfi_hash = NULL;
333 } else if (stats) {
334 printf("cfi_bits: %d\n", cfi_bits);
335 }
336
337 return cfi_hash;
338 }
339
340 static unsigned long nr_insns;
341 static unsigned long nr_insns_visited;
342
343 /*
344 * Call the arch-specific instruction decoder for all the instructions and add
345 * them to the global instruction list.
346 */
decode_instructions(struct objtool_file * file)347 static int decode_instructions(struct objtool_file *file)
348 {
349 struct section *sec;
350 struct symbol *func;
351 unsigned long offset;
352 struct instruction *insn;
353 int ret;
354
355 for_each_sec(file, sec) {
356
357 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
358 continue;
359
360 if (strcmp(sec->name, ".altinstr_replacement") &&
361 strcmp(sec->name, ".altinstr_aux") &&
362 strncmp(sec->name, ".discard.", 9))
363 sec->text = true;
364
365 if (!strcmp(sec->name, ".noinstr.text") ||
366 !strcmp(sec->name, ".entry.text") ||
367 !strncmp(sec->name, ".text.__x86.", 12))
368 sec->noinstr = true;
369
370 for (offset = 0; offset < sec->len; offset += insn->len) {
371 insn = malloc(sizeof(*insn));
372 if (!insn) {
373 WARN("malloc failed");
374 return -1;
375 }
376 memset(insn, 0, sizeof(*insn));
377 INIT_LIST_HEAD(&insn->alts);
378 INIT_LIST_HEAD(&insn->stack_ops);
379
380 insn->sec = sec;
381 insn->offset = offset;
382
383 ret = arch_decode_instruction(file->elf, sec, offset,
384 sec->len - offset,
385 &insn->len, &insn->type,
386 &insn->immediate,
387 &insn->stack_ops);
388 if (ret)
389 goto err;
390
391 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
392 list_add_tail(&insn->list, &file->insn_list);
393 nr_insns++;
394 }
395
396 list_for_each_entry(func, &sec->symbol_list, list) {
397 if (func->type != STT_FUNC || func->alias != func)
398 continue;
399
400 if (!find_insn(file, sec, func->offset)) {
401 WARN("%s(): can't find starting instruction",
402 func->name);
403 return -1;
404 }
405
406 sym_for_each_insn(file, func, insn)
407 insn->func = func;
408 }
409 }
410
411 if (stats)
412 printf("nr_insns: %lu\n", nr_insns);
413
414 return 0;
415
416 err:
417 free(insn);
418 return ret;
419 }
420
find_last_insn(struct objtool_file * file,struct section * sec)421 static struct instruction *find_last_insn(struct objtool_file *file,
422 struct section *sec)
423 {
424 struct instruction *insn = NULL;
425 unsigned int offset;
426 unsigned int end = (sec->len > 10) ? sec->len - 10 : 0;
427
428 for (offset = sec->len - 1; offset >= end && !insn; offset--)
429 insn = find_insn(file, sec, offset);
430
431 return insn;
432 }
433
434 /*
435 * Mark "ud2" instructions and manually annotated dead ends.
436 */
add_dead_ends(struct objtool_file * file)437 static int add_dead_ends(struct objtool_file *file)
438 {
439 struct section *sec;
440 struct reloc *reloc;
441 struct instruction *insn;
442
443 /*
444 * By default, "ud2" is a dead end unless otherwise annotated, because
445 * GCC 7 inserts it for certain divide-by-zero cases.
446 */
447 for_each_insn(file, insn)
448 if (insn->type == INSN_BUG)
449 insn->dead_end = true;
450
451 /*
452 * Check for manually annotated dead ends.
453 */
454 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
455 if (!sec)
456 goto reachable;
457
458 list_for_each_entry(reloc, &sec->reloc_list, list) {
459 if (reloc->sym->type != STT_SECTION) {
460 WARN("unexpected relocation symbol type in %s", sec->name);
461 return -1;
462 }
463 insn = find_insn(file, reloc->sym->sec, reloc->addend);
464 if (insn)
465 insn = list_prev_entry(insn, list);
466 else if (reloc->addend == reloc->sym->sec->len) {
467 insn = find_last_insn(file, reloc->sym->sec);
468 if (!insn) {
469 WARN("can't find unreachable insn at %s+0x%x",
470 reloc->sym->sec->name, reloc->addend);
471 return -1;
472 }
473 } else {
474 WARN("can't find unreachable insn at %s+0x%x",
475 reloc->sym->sec->name, reloc->addend);
476 return -1;
477 }
478
479 insn->dead_end = true;
480 }
481
482 reachable:
483 /*
484 * These manually annotated reachable checks are needed for GCC 4.4,
485 * where the Linux unreachable() macro isn't supported. In that case
486 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
487 * not a dead end.
488 */
489 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
490 if (!sec)
491 return 0;
492
493 list_for_each_entry(reloc, &sec->reloc_list, list) {
494 if (reloc->sym->type != STT_SECTION) {
495 WARN("unexpected relocation symbol type in %s", sec->name);
496 return -1;
497 }
498 insn = find_insn(file, reloc->sym->sec, reloc->addend);
499 if (insn)
500 insn = list_prev_entry(insn, list);
501 else if (reloc->addend == reloc->sym->sec->len) {
502 insn = find_last_insn(file, reloc->sym->sec);
503 if (!insn) {
504 WARN("can't find reachable insn at %s+0x%x",
505 reloc->sym->sec->name, reloc->addend);
506 return -1;
507 }
508 } else {
509 WARN("can't find reachable insn at %s+0x%x",
510 reloc->sym->sec->name, reloc->addend);
511 return -1;
512 }
513
514 insn->dead_end = false;
515 }
516
517 return 0;
518 }
519
create_static_call_sections(struct objtool_file * file)520 static int create_static_call_sections(struct objtool_file *file)
521 {
522 struct section *sec;
523 struct static_call_site *site;
524 struct instruction *insn;
525 struct symbol *key_sym;
526 char *key_name, *tmp;
527 int idx;
528
529 sec = find_section_by_name(file->elf, ".static_call_sites");
530 if (sec) {
531 INIT_LIST_HEAD(&file->static_call_list);
532 WARN("file already has .static_call_sites section, skipping");
533 return 0;
534 }
535
536 if (list_empty(&file->static_call_list))
537 return 0;
538
539 idx = 0;
540 list_for_each_entry(insn, &file->static_call_list, call_node)
541 idx++;
542
543 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
544 sizeof(struct static_call_site), idx);
545 if (!sec)
546 return -1;
547
548 idx = 0;
549 list_for_each_entry(insn, &file->static_call_list, call_node) {
550
551 site = (struct static_call_site *)sec->data->d_buf + idx;
552 memset(site, 0, sizeof(struct static_call_site));
553
554 /* populate reloc for 'addr' */
555 if (elf_add_reloc_to_insn(file->elf, sec,
556 idx * sizeof(struct static_call_site),
557 R_X86_64_PC32,
558 insn->sec, insn->offset))
559 return -1;
560
561 /* find key symbol */
562 key_name = strdup(insn->call_dest->name);
563 if (!key_name) {
564 perror("strdup");
565 return -1;
566 }
567 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
568 STATIC_CALL_TRAMP_PREFIX_LEN)) {
569 WARN("static_call: trampoline name malformed: %s", key_name);
570 return -1;
571 }
572 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
573 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
574
575 key_sym = find_symbol_by_name(file->elf, tmp);
576 if (!key_sym) {
577 if (!module) {
578 WARN("static_call: can't find static_call_key symbol: %s", tmp);
579 return -1;
580 }
581
582 /*
583 * For modules(), the key might not be exported, which
584 * means the module can make static calls but isn't
585 * allowed to change them.
586 *
587 * In that case we temporarily set the key to be the
588 * trampoline address. This is fixed up in
589 * static_call_add_module().
590 */
591 key_sym = insn->call_dest;
592 }
593 free(key_name);
594
595 /* populate reloc for 'key' */
596 if (elf_add_reloc(file->elf, sec,
597 idx * sizeof(struct static_call_site) + 4,
598 R_X86_64_PC32, key_sym,
599 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
600 return -1;
601
602 idx++;
603 }
604
605 return 0;
606 }
607
create_retpoline_sites_sections(struct objtool_file * file)608 static int create_retpoline_sites_sections(struct objtool_file *file)
609 {
610 struct instruction *insn;
611 struct section *sec;
612 int idx;
613
614 sec = find_section_by_name(file->elf, ".retpoline_sites");
615 if (sec) {
616 WARN("file already has .retpoline_sites, skipping");
617 return 0;
618 }
619
620 idx = 0;
621 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
622 idx++;
623
624 if (!idx)
625 return 0;
626
627 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
628 sizeof(int), idx);
629 if (!sec) {
630 WARN("elf_create_section: .retpoline_sites");
631 return -1;
632 }
633
634 idx = 0;
635 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
636
637 int *site = (int *)sec->data->d_buf + idx;
638 *site = 0;
639
640 if (elf_add_reloc_to_insn(file->elf, sec,
641 idx * sizeof(int),
642 R_X86_64_PC32,
643 insn->sec, insn->offset)) {
644 WARN("elf_add_reloc_to_insn: .retpoline_sites");
645 return -1;
646 }
647
648 idx++;
649 }
650
651 return 0;
652 }
653
create_return_sites_sections(struct objtool_file * file)654 static int create_return_sites_sections(struct objtool_file *file)
655 {
656 struct instruction *insn;
657 struct section *sec;
658 int idx;
659
660 sec = find_section_by_name(file->elf, ".return_sites");
661 if (sec) {
662 WARN("file already has .return_sites, skipping");
663 return 0;
664 }
665
666 idx = 0;
667 list_for_each_entry(insn, &file->return_thunk_list, call_node)
668 idx++;
669
670 if (!idx)
671 return 0;
672
673 sec = elf_create_section(file->elf, ".return_sites", 0,
674 sizeof(int), idx);
675 if (!sec) {
676 WARN("elf_create_section: .return_sites");
677 return -1;
678 }
679
680 idx = 0;
681 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
682
683 int *site = (int *)sec->data->d_buf + idx;
684 *site = 0;
685
686 if (elf_add_reloc_to_insn(file->elf, sec,
687 idx * sizeof(int),
688 R_X86_64_PC32,
689 insn->sec, insn->offset)) {
690 WARN("elf_add_reloc_to_insn: .return_sites");
691 return -1;
692 }
693
694 idx++;
695 }
696
697 return 0;
698 }
699
700 /*
701 * Warnings shouldn't be reported for ignored functions.
702 */
add_ignores(struct objtool_file * file)703 static void add_ignores(struct objtool_file *file)
704 {
705 struct instruction *insn;
706 struct section *sec;
707 struct symbol *func;
708 struct reloc *reloc;
709
710 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
711 if (!sec)
712 return;
713
714 list_for_each_entry(reloc, &sec->reloc_list, list) {
715 switch (reloc->sym->type) {
716 case STT_FUNC:
717 func = reloc->sym;
718 break;
719
720 case STT_SECTION:
721 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
722 if (!func)
723 continue;
724 break;
725
726 default:
727 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
728 continue;
729 }
730
731 func_for_each_insn(file, func, insn)
732 insn->ignore = true;
733 }
734 }
735
736 /*
737 * This is a whitelist of functions that is allowed to be called with AC set.
738 * The list is meant to be minimal and only contains compiler instrumentation
739 * ABI and a few functions used to implement *_{to,from}_user() functions.
740 *
741 * These functions must not directly change AC, but may PUSHF/POPF.
742 */
743 static const char *uaccess_safe_builtin[] = {
744 /* KASAN */
745 "kasan_report",
746 "check_memory_region",
747 /* KASAN out-of-line */
748 "__asan_loadN_noabort",
749 "__asan_load1_noabort",
750 "__asan_load2_noabort",
751 "__asan_load4_noabort",
752 "__asan_load8_noabort",
753 "__asan_load16_noabort",
754 "__asan_storeN_noabort",
755 "__asan_store1_noabort",
756 "__asan_store2_noabort",
757 "__asan_store4_noabort",
758 "__asan_store8_noabort",
759 "__asan_store16_noabort",
760 "__kasan_check_read",
761 "__kasan_check_write",
762 /* KASAN in-line */
763 "__asan_report_load_n_noabort",
764 "__asan_report_load1_noabort",
765 "__asan_report_load2_noabort",
766 "__asan_report_load4_noabort",
767 "__asan_report_load8_noabort",
768 "__asan_report_load16_noabort",
769 "__asan_report_store_n_noabort",
770 "__asan_report_store1_noabort",
771 "__asan_report_store2_noabort",
772 "__asan_report_store4_noabort",
773 "__asan_report_store8_noabort",
774 "__asan_report_store16_noabort",
775 /* KCSAN */
776 "__kcsan_check_access",
777 "kcsan_found_watchpoint",
778 "kcsan_setup_watchpoint",
779 "kcsan_check_scoped_accesses",
780 "kcsan_disable_current",
781 "kcsan_enable_current_nowarn",
782 /* KCSAN/TSAN */
783 "__tsan_func_entry",
784 "__tsan_func_exit",
785 "__tsan_read_range",
786 "__tsan_write_range",
787 "__tsan_read1",
788 "__tsan_read2",
789 "__tsan_read4",
790 "__tsan_read8",
791 "__tsan_read16",
792 "__tsan_write1",
793 "__tsan_write2",
794 "__tsan_write4",
795 "__tsan_write8",
796 "__tsan_write16",
797 "__tsan_read_write1",
798 "__tsan_read_write2",
799 "__tsan_read_write4",
800 "__tsan_read_write8",
801 "__tsan_read_write16",
802 "__tsan_atomic8_load",
803 "__tsan_atomic16_load",
804 "__tsan_atomic32_load",
805 "__tsan_atomic64_load",
806 "__tsan_atomic8_store",
807 "__tsan_atomic16_store",
808 "__tsan_atomic32_store",
809 "__tsan_atomic64_store",
810 "__tsan_atomic8_exchange",
811 "__tsan_atomic16_exchange",
812 "__tsan_atomic32_exchange",
813 "__tsan_atomic64_exchange",
814 "__tsan_atomic8_fetch_add",
815 "__tsan_atomic16_fetch_add",
816 "__tsan_atomic32_fetch_add",
817 "__tsan_atomic64_fetch_add",
818 "__tsan_atomic8_fetch_sub",
819 "__tsan_atomic16_fetch_sub",
820 "__tsan_atomic32_fetch_sub",
821 "__tsan_atomic64_fetch_sub",
822 "__tsan_atomic8_fetch_and",
823 "__tsan_atomic16_fetch_and",
824 "__tsan_atomic32_fetch_and",
825 "__tsan_atomic64_fetch_and",
826 "__tsan_atomic8_fetch_or",
827 "__tsan_atomic16_fetch_or",
828 "__tsan_atomic32_fetch_or",
829 "__tsan_atomic64_fetch_or",
830 "__tsan_atomic8_fetch_xor",
831 "__tsan_atomic16_fetch_xor",
832 "__tsan_atomic32_fetch_xor",
833 "__tsan_atomic64_fetch_xor",
834 "__tsan_atomic8_fetch_nand",
835 "__tsan_atomic16_fetch_nand",
836 "__tsan_atomic32_fetch_nand",
837 "__tsan_atomic64_fetch_nand",
838 "__tsan_atomic8_compare_exchange_strong",
839 "__tsan_atomic16_compare_exchange_strong",
840 "__tsan_atomic32_compare_exchange_strong",
841 "__tsan_atomic64_compare_exchange_strong",
842 "__tsan_atomic8_compare_exchange_weak",
843 "__tsan_atomic16_compare_exchange_weak",
844 "__tsan_atomic32_compare_exchange_weak",
845 "__tsan_atomic64_compare_exchange_weak",
846 "__tsan_atomic8_compare_exchange_val",
847 "__tsan_atomic16_compare_exchange_val",
848 "__tsan_atomic32_compare_exchange_val",
849 "__tsan_atomic64_compare_exchange_val",
850 "__tsan_atomic_thread_fence",
851 "__tsan_atomic_signal_fence",
852 /* KCOV */
853 "write_comp_data",
854 "check_kcov_mode",
855 "__sanitizer_cov_trace_pc",
856 "__sanitizer_cov_trace_const_cmp1",
857 "__sanitizer_cov_trace_const_cmp2",
858 "__sanitizer_cov_trace_const_cmp4",
859 "__sanitizer_cov_trace_const_cmp8",
860 "__sanitizer_cov_trace_cmp1",
861 "__sanitizer_cov_trace_cmp2",
862 "__sanitizer_cov_trace_cmp4",
863 "__sanitizer_cov_trace_cmp8",
864 "__sanitizer_cov_trace_switch",
865 /* UBSAN */
866 "ubsan_type_mismatch_common",
867 "__ubsan_handle_type_mismatch",
868 "__ubsan_handle_type_mismatch_v1",
869 "__ubsan_handle_shift_out_of_bounds",
870 /* misc */
871 "csum_partial_copy_generic",
872 "copy_mc_fragile",
873 "copy_mc_fragile_handle_tail",
874 "copy_mc_enhanced_fast_string",
875 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
876 NULL
877 };
878
add_uaccess_safe(struct objtool_file * file)879 static void add_uaccess_safe(struct objtool_file *file)
880 {
881 struct symbol *func;
882 const char **name;
883
884 if (!uaccess)
885 return;
886
887 for (name = uaccess_safe_builtin; *name; name++) {
888 func = find_symbol_by_name(file->elf, *name);
889 if (!func)
890 continue;
891
892 func->uaccess_safe = true;
893 }
894 }
895
896 /*
897 * FIXME: For now, just ignore any alternatives which add retpolines. This is
898 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
899 * But it at least allows objtool to understand the control flow *around* the
900 * retpoline.
901 */
add_ignore_alternatives(struct objtool_file * file)902 static int add_ignore_alternatives(struct objtool_file *file)
903 {
904 struct section *sec;
905 struct reloc *reloc;
906 struct instruction *insn;
907
908 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
909 if (!sec)
910 return 0;
911
912 list_for_each_entry(reloc, &sec->reloc_list, list) {
913 if (reloc->sym->type != STT_SECTION) {
914 WARN("unexpected relocation symbol type in %s", sec->name);
915 return -1;
916 }
917
918 insn = find_insn(file, reloc->sym->sec, reloc->addend);
919 if (!insn) {
920 WARN("bad .discard.ignore_alts entry");
921 return -1;
922 }
923
924 insn->ignore_alts = true;
925 }
926
927 return 0;
928 }
929
arch_is_retpoline(struct symbol * sym)930 __weak bool arch_is_retpoline(struct symbol *sym)
931 {
932 return false;
933 }
934
arch_is_rethunk(struct symbol * sym)935 __weak bool arch_is_rethunk(struct symbol *sym)
936 {
937 return false;
938 }
939
940 #define NEGATIVE_RELOC ((void *)-1L)
941
insn_reloc(struct objtool_file * file,struct instruction * insn)942 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
943 {
944 if (insn->reloc == NEGATIVE_RELOC)
945 return NULL;
946
947 if (!insn->reloc) {
948 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec,
949 insn->offset, insn->len);
950 if (!insn->reloc) {
951 insn->reloc = NEGATIVE_RELOC;
952 return NULL;
953 }
954 }
955
956 return insn->reloc;
957 }
958
remove_insn_ops(struct instruction * insn)959 static void remove_insn_ops(struct instruction *insn)
960 {
961 struct stack_op *op, *tmp;
962
963 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) {
964 list_del(&op->list);
965 free(op);
966 }
967 }
968
annotate_call_site(struct objtool_file * file,struct instruction * insn,bool sibling)969 static void annotate_call_site(struct objtool_file *file,
970 struct instruction *insn, bool sibling)
971 {
972 struct reloc *reloc = insn_reloc(file, insn);
973 struct symbol *sym = insn->call_dest;
974
975 if (!sym)
976 sym = reloc->sym;
977
978 /*
979 * Alternative replacement code is just template code which is
980 * sometimes copied to the original instruction. For now, don't
981 * annotate it. (In the future we might consider annotating the
982 * original instruction if/when it ever makes sense to do so.)
983 */
984 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
985 return;
986
987 if (sym->static_call_tramp) {
988 list_add_tail(&insn->call_node, &file->static_call_list);
989 return;
990 }
991
992 if (sym->retpoline_thunk) {
993 list_add_tail(&insn->call_node, &file->retpoline_call_list);
994 return;
995 }
996
997 /*
998 * Many compilers cannot disable KCOV with a function attribute
999 * so they need a little help, NOP out any KCOV calls from noinstr
1000 * text.
1001 */
1002 if (insn->sec->noinstr && sym->kcov) {
1003 if (reloc) {
1004 reloc->type = R_NONE;
1005 elf_write_reloc(file->elf, reloc);
1006 }
1007
1008 elf_write_insn(file->elf, insn->sec,
1009 insn->offset, insn->len,
1010 sibling ? arch_ret_insn(insn->len)
1011 : arch_nop_insn(insn->len));
1012
1013 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1014
1015 if (sibling) {
1016 /*
1017 * We've replaced the tail-call JMP insn by two new
1018 * insn: RET; INT3, except we only have a single struct
1019 * insn here. Mark it retpoline_safe to avoid the SLS
1020 * warning, instead of adding another insn.
1021 */
1022 insn->retpoline_safe = true;
1023 }
1024
1025 return;
1026 }
1027 }
1028
add_call_dest(struct objtool_file * file,struct instruction * insn,struct symbol * dest,bool sibling)1029 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1030 struct symbol *dest, bool sibling)
1031 {
1032 insn->call_dest = dest;
1033 if (!dest)
1034 return;
1035
1036 /*
1037 * Whatever stack impact regular CALLs have, should be undone
1038 * by the RETURN of the called function.
1039 *
1040 * Annotated intra-function calls retain the stack_ops but
1041 * are converted to JUMP, see read_intra_function_calls().
1042 */
1043 remove_insn_ops(insn);
1044
1045 annotate_call_site(file, insn, sibling);
1046 }
1047
add_retpoline_call(struct objtool_file * file,struct instruction * insn)1048 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1049 {
1050 /*
1051 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1052 * so convert them accordingly.
1053 */
1054 switch (insn->type) {
1055 case INSN_CALL:
1056 insn->type = INSN_CALL_DYNAMIC;
1057 break;
1058 case INSN_JUMP_UNCONDITIONAL:
1059 insn->type = INSN_JUMP_DYNAMIC;
1060 break;
1061 case INSN_JUMP_CONDITIONAL:
1062 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1063 break;
1064 default:
1065 return;
1066 }
1067
1068 insn->retpoline_safe = true;
1069
1070 /*
1071 * Whatever stack impact regular CALLs have, should be undone
1072 * by the RETURN of the called function.
1073 *
1074 * Annotated intra-function calls retain the stack_ops but
1075 * are converted to JUMP, see read_intra_function_calls().
1076 */
1077 remove_insn_ops(insn);
1078
1079 annotate_call_site(file, insn, false);
1080 }
1081
add_return_call(struct objtool_file * file,struct instruction * insn,bool add)1082 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1083 {
1084 /*
1085 * Return thunk tail calls are really just returns in disguise,
1086 * so convert them accordingly.
1087 */
1088 insn->type = INSN_RETURN;
1089 insn->retpoline_safe = true;
1090
1091 /* Skip the non-text sections, specially .discard ones */
1092 if (add && insn->sec->text)
1093 list_add_tail(&insn->call_node, &file->return_thunk_list);
1094 }
1095
1096 /*
1097 * Find the destination instructions for all jumps.
1098 */
add_jump_destinations(struct objtool_file * file)1099 static int add_jump_destinations(struct objtool_file *file)
1100 {
1101 struct instruction *insn;
1102 struct reloc *reloc;
1103 struct section *dest_sec;
1104 unsigned long dest_off;
1105
1106 for_each_insn(file, insn) {
1107 if (!is_static_jump(insn))
1108 continue;
1109
1110 reloc = insn_reloc(file, insn);
1111 if (!reloc) {
1112 dest_sec = insn->sec;
1113 dest_off = arch_jump_destination(insn);
1114 } else if (reloc->sym->type == STT_SECTION) {
1115 dest_sec = reloc->sym->sec;
1116 dest_off = arch_dest_reloc_offset(reloc->addend);
1117 } else if (reloc->sym->retpoline_thunk) {
1118 add_retpoline_call(file, insn);
1119 continue;
1120 } else if (reloc->sym->return_thunk) {
1121 add_return_call(file, insn, true);
1122 continue;
1123 } else if (insn->func) {
1124 /* internal or external sibling call (with reloc) */
1125 add_call_dest(file, insn, reloc->sym, true);
1126 continue;
1127 } else if (reloc->sym->sec->idx) {
1128 dest_sec = reloc->sym->sec;
1129 dest_off = reloc->sym->sym.st_value +
1130 arch_dest_reloc_offset(reloc->addend);
1131 } else {
1132 /* non-func asm code jumping to another file */
1133 continue;
1134 }
1135
1136 insn->jump_dest = find_insn(file, dest_sec, dest_off);
1137 if (!insn->jump_dest) {
1138 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1139
1140 /*
1141 * This is a special case where an alt instruction
1142 * jumps past the end of the section. These are
1143 * handled later in handle_group_alt().
1144 */
1145 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1146 continue;
1147
1148 /*
1149 * This is a special case for zen_untrain_ret().
1150 * It jumps to __x86_return_thunk(), but objtool
1151 * can't find the thunk's starting RET
1152 * instruction, because the RET is also in the
1153 * middle of another instruction. Objtool only
1154 * knows about the outer instruction.
1155 */
1156 if (sym && sym->return_thunk) {
1157 add_return_call(file, insn, false);
1158 continue;
1159 }
1160
1161 WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
1162 insn->sec, insn->offset, dest_sec->name,
1163 dest_off);
1164 return -1;
1165 }
1166
1167 /*
1168 * Cross-function jump.
1169 */
1170 if (insn->func && insn->jump_dest->func &&
1171 insn->func != insn->jump_dest->func) {
1172
1173 /*
1174 * For GCC 8+, create parent/child links for any cold
1175 * subfunctions. This is _mostly_ redundant with a
1176 * similar initialization in read_symbols().
1177 *
1178 * If a function has aliases, we want the *first* such
1179 * function in the symbol table to be the subfunction's
1180 * parent. In that case we overwrite the
1181 * initialization done in read_symbols().
1182 *
1183 * However this code can't completely replace the
1184 * read_symbols() code because this doesn't detect the
1185 * case where the parent function's only reference to a
1186 * subfunction is through a jump table.
1187 */
1188 if (!strstr(insn->func->name, ".cold") &&
1189 strstr(insn->jump_dest->func->name, ".cold")) {
1190 insn->func->cfunc = insn->jump_dest->func;
1191 insn->jump_dest->func->pfunc = insn->func;
1192
1193 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
1194 insn->jump_dest->offset == insn->jump_dest->func->offset) {
1195 /* internal sibling call (without reloc) */
1196 add_call_dest(file, insn, insn->jump_dest->func, true);
1197 }
1198 }
1199 }
1200
1201 return 0;
1202 }
1203
find_call_destination(struct section * sec,unsigned long offset)1204 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1205 {
1206 struct symbol *call_dest;
1207
1208 call_dest = find_func_by_offset(sec, offset);
1209 if (!call_dest)
1210 call_dest = find_symbol_by_offset(sec, offset);
1211
1212 return call_dest;
1213 }
1214
1215 /*
1216 * Find the destination instructions for all calls.
1217 */
add_call_destinations(struct objtool_file * file)1218 static int add_call_destinations(struct objtool_file *file)
1219 {
1220 struct instruction *insn;
1221 unsigned long dest_off;
1222 struct symbol *dest;
1223 struct reloc *reloc;
1224
1225 for_each_insn(file, insn) {
1226 if (insn->type != INSN_CALL)
1227 continue;
1228
1229 reloc = insn_reloc(file, insn);
1230 if (!reloc) {
1231 dest_off = arch_jump_destination(insn);
1232 dest = find_call_destination(insn->sec, dest_off);
1233
1234 add_call_dest(file, insn, dest, false);
1235
1236 if (insn->ignore)
1237 continue;
1238
1239 if (!insn->call_dest) {
1240 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset);
1241 return -1;
1242 }
1243
1244 if (insn->func && insn->call_dest->type != STT_FUNC) {
1245 WARN_FUNC("unsupported call to non-function",
1246 insn->sec, insn->offset);
1247 return -1;
1248 }
1249
1250 } else if (reloc->sym->type == STT_SECTION) {
1251 dest_off = arch_dest_reloc_offset(reloc->addend);
1252 dest = find_call_destination(reloc->sym->sec, dest_off);
1253 if (!dest) {
1254 WARN_FUNC("can't find call dest symbol at %s+0x%lx",
1255 insn->sec, insn->offset,
1256 reloc->sym->sec->name,
1257 dest_off);
1258 return -1;
1259 }
1260
1261 add_call_dest(file, insn, dest, false);
1262
1263 } else if (reloc->sym->retpoline_thunk) {
1264 add_retpoline_call(file, insn);
1265
1266 } else
1267 add_call_dest(file, insn, reloc->sym, false);
1268 }
1269
1270 return 0;
1271 }
1272
1273 /*
1274 * The .alternatives section requires some extra special care over and above
1275 * other special sections because alternatives are patched in place.
1276 */
handle_group_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1277 static int handle_group_alt(struct objtool_file *file,
1278 struct special_alt *special_alt,
1279 struct instruction *orig_insn,
1280 struct instruction **new_insn)
1281 {
1282 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL;
1283 struct alt_group *orig_alt_group, *new_alt_group;
1284 unsigned long dest_off;
1285
1286
1287 orig_alt_group = malloc(sizeof(*orig_alt_group));
1288 if (!orig_alt_group) {
1289 WARN("malloc failed");
1290 return -1;
1291 }
1292 orig_alt_group->cfi = calloc(special_alt->orig_len,
1293 sizeof(struct cfi_state *));
1294 if (!orig_alt_group->cfi) {
1295 WARN("calloc failed");
1296 return -1;
1297 }
1298
1299 last_orig_insn = NULL;
1300 insn = orig_insn;
1301 sec_for_each_insn_from(file, insn) {
1302 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1303 break;
1304
1305 insn->alt_group = orig_alt_group;
1306 last_orig_insn = insn;
1307 }
1308 orig_alt_group->orig_group = NULL;
1309 orig_alt_group->first_insn = orig_insn;
1310 orig_alt_group->last_insn = last_orig_insn;
1311
1312
1313 new_alt_group = malloc(sizeof(*new_alt_group));
1314 if (!new_alt_group) {
1315 WARN("malloc failed");
1316 return -1;
1317 }
1318
1319 if (special_alt->new_len < special_alt->orig_len) {
1320 /*
1321 * Insert a fake nop at the end to make the replacement
1322 * alt_group the same size as the original. This is needed to
1323 * allow propagate_alt_cfi() to do its magic. When the last
1324 * instruction affects the stack, the instruction after it (the
1325 * nop) will propagate the new state to the shared CFI array.
1326 */
1327 nop = malloc(sizeof(*nop));
1328 if (!nop) {
1329 WARN("malloc failed");
1330 return -1;
1331 }
1332 memset(nop, 0, sizeof(*nop));
1333 INIT_LIST_HEAD(&nop->alts);
1334 INIT_LIST_HEAD(&nop->stack_ops);
1335
1336 nop->sec = special_alt->new_sec;
1337 nop->offset = special_alt->new_off + special_alt->new_len;
1338 nop->len = special_alt->orig_len - special_alt->new_len;
1339 nop->type = INSN_NOP;
1340 nop->func = orig_insn->func;
1341 nop->alt_group = new_alt_group;
1342 nop->ignore = orig_insn->ignore_alts;
1343 }
1344
1345 if (!special_alt->new_len) {
1346 *new_insn = nop;
1347 goto end;
1348 }
1349
1350 insn = *new_insn;
1351 sec_for_each_insn_from(file, insn) {
1352 struct reloc *alt_reloc;
1353
1354 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1355 break;
1356
1357 last_new_insn = insn;
1358
1359 insn->ignore = orig_insn->ignore_alts;
1360 insn->func = orig_insn->func;
1361 insn->alt_group = new_alt_group;
1362
1363 /*
1364 * Since alternative replacement code is copy/pasted by the
1365 * kernel after applying relocations, generally such code can't
1366 * have relative-address relocation references to outside the
1367 * .altinstr_replacement section, unless the arch's
1368 * alternatives code can adjust the relative offsets
1369 * accordingly.
1370 */
1371 alt_reloc = insn_reloc(file, insn);
1372 if (alt_reloc &&
1373 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1374
1375 WARN_FUNC("unsupported relocation in alternatives section",
1376 insn->sec, insn->offset);
1377 return -1;
1378 }
1379
1380 if (!is_static_jump(insn))
1381 continue;
1382
1383 if (!insn->immediate)
1384 continue;
1385
1386 dest_off = arch_jump_destination(insn);
1387 if (dest_off == special_alt->new_off + special_alt->new_len)
1388 insn->jump_dest = next_insn_same_sec(file, last_orig_insn);
1389
1390 if (!insn->jump_dest) {
1391 WARN_FUNC("can't find alternative jump destination",
1392 insn->sec, insn->offset);
1393 return -1;
1394 }
1395 }
1396
1397 if (!last_new_insn) {
1398 WARN_FUNC("can't find last new alternative instruction",
1399 special_alt->new_sec, special_alt->new_off);
1400 return -1;
1401 }
1402
1403 if (nop)
1404 list_add(&nop->list, &last_new_insn->list);
1405 end:
1406 new_alt_group->orig_group = orig_alt_group;
1407 new_alt_group->first_insn = *new_insn;
1408 new_alt_group->last_insn = nop ? : last_new_insn;
1409 new_alt_group->cfi = orig_alt_group->cfi;
1410 return 0;
1411 }
1412
1413 /*
1414 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1415 * If the original instruction is a jump, make the alt entry an effective nop
1416 * by just skipping the original instruction.
1417 */
handle_jump_alt(struct objtool_file * file,struct special_alt * special_alt,struct instruction * orig_insn,struct instruction ** new_insn)1418 static int handle_jump_alt(struct objtool_file *file,
1419 struct special_alt *special_alt,
1420 struct instruction *orig_insn,
1421 struct instruction **new_insn)
1422 {
1423 if (orig_insn->type == INSN_NOP)
1424 return 0;
1425
1426 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
1427 WARN_FUNC("unsupported instruction at jump label",
1428 orig_insn->sec, orig_insn->offset);
1429 return -1;
1430 }
1431
1432 *new_insn = list_next_entry(orig_insn, list);
1433 return 0;
1434 }
1435
1436 /*
1437 * Read all the special sections which have alternate instructions which can be
1438 * patched in or redirected to at runtime. Each instruction having alternate
1439 * instruction(s) has them added to its insn->alts list, which will be
1440 * traversed in validate_branch().
1441 */
add_special_section_alts(struct objtool_file * file)1442 static int add_special_section_alts(struct objtool_file *file)
1443 {
1444 struct list_head special_alts;
1445 struct instruction *orig_insn, *new_insn;
1446 struct special_alt *special_alt, *tmp;
1447 struct alternative *alt;
1448 int ret;
1449
1450 ret = special_get_alts(file->elf, &special_alts);
1451 if (ret)
1452 return ret;
1453
1454 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1455
1456 orig_insn = find_insn(file, special_alt->orig_sec,
1457 special_alt->orig_off);
1458 if (!orig_insn) {
1459 WARN_FUNC("special: can't find orig instruction",
1460 special_alt->orig_sec, special_alt->orig_off);
1461 ret = -1;
1462 goto out;
1463 }
1464
1465 new_insn = NULL;
1466 if (!special_alt->group || special_alt->new_len) {
1467 new_insn = find_insn(file, special_alt->new_sec,
1468 special_alt->new_off);
1469 if (!new_insn) {
1470 WARN_FUNC("special: can't find new instruction",
1471 special_alt->new_sec,
1472 special_alt->new_off);
1473 ret = -1;
1474 goto out;
1475 }
1476 }
1477
1478 if (special_alt->group) {
1479 if (!special_alt->orig_len) {
1480 WARN_FUNC("empty alternative entry",
1481 orig_insn->sec, orig_insn->offset);
1482 continue;
1483 }
1484
1485 ret = handle_group_alt(file, special_alt, orig_insn,
1486 &new_insn);
1487 if (ret)
1488 goto out;
1489 } else if (special_alt->jump_or_nop) {
1490 ret = handle_jump_alt(file, special_alt, orig_insn,
1491 &new_insn);
1492 if (ret)
1493 goto out;
1494 }
1495
1496 alt = malloc(sizeof(*alt));
1497 if (!alt) {
1498 WARN("malloc failed");
1499 ret = -1;
1500 goto out;
1501 }
1502
1503 alt->insn = new_insn;
1504 alt->skip_orig = special_alt->skip_orig;
1505 orig_insn->ignore_alts |= special_alt->skip_alt;
1506 list_add_tail(&alt->list, &orig_insn->alts);
1507
1508 list_del(&special_alt->list);
1509 free(special_alt);
1510 }
1511
1512 out:
1513 return ret;
1514 }
1515
add_jump_table(struct objtool_file * file,struct instruction * insn,struct reloc * table)1516 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1517 struct reloc *table)
1518 {
1519 struct reloc *reloc = table;
1520 struct instruction *dest_insn;
1521 struct alternative *alt;
1522 struct symbol *pfunc = insn->func->pfunc;
1523 unsigned int prev_offset = 0;
1524
1525 /*
1526 * Each @reloc is a switch table relocation which points to the target
1527 * instruction.
1528 */
1529 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
1530
1531 /* Check for the end of the table: */
1532 if (reloc != table && reloc->jump_table_start)
1533 break;
1534
1535 /* Make sure the table entries are consecutive: */
1536 if (prev_offset && reloc->offset != prev_offset + 8)
1537 break;
1538
1539 /* Detect function pointers from contiguous objects: */
1540 if (reloc->sym->sec == pfunc->sec &&
1541 reloc->addend == pfunc->offset)
1542 break;
1543
1544 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
1545 if (!dest_insn)
1546 break;
1547
1548 /* Make sure the destination is in the same function: */
1549 if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
1550 break;
1551
1552 alt = malloc(sizeof(*alt));
1553 if (!alt) {
1554 WARN("malloc failed");
1555 return -1;
1556 }
1557
1558 alt->insn = dest_insn;
1559 list_add_tail(&alt->list, &insn->alts);
1560 prev_offset = reloc->offset;
1561 }
1562
1563 if (!prev_offset) {
1564 WARN_FUNC("can't find switch jump table",
1565 insn->sec, insn->offset);
1566 return -1;
1567 }
1568
1569 return 0;
1570 }
1571
1572 /*
1573 * find_jump_table() - Given a dynamic jump, find the switch jump table
1574 * associated with it.
1575 */
find_jump_table(struct objtool_file * file,struct symbol * func,struct instruction * insn)1576 static struct reloc *find_jump_table(struct objtool_file *file,
1577 struct symbol *func,
1578 struct instruction *insn)
1579 {
1580 struct reloc *table_reloc;
1581 struct instruction *dest_insn, *orig_insn = insn;
1582
1583 /*
1584 * Backward search using the @first_jump_src links, these help avoid
1585 * much of the 'in between' code. Which avoids us getting confused by
1586 * it.
1587 */
1588 for (;
1589 insn && insn->func && insn->func->pfunc == func;
1590 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
1591
1592 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
1593 break;
1594
1595 /* allow small jumps within the range */
1596 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
1597 insn->jump_dest &&
1598 (insn->jump_dest->offset <= insn->offset ||
1599 insn->jump_dest->offset > orig_insn->offset))
1600 break;
1601
1602 table_reloc = arch_find_switch_table(file, insn);
1603 if (!table_reloc)
1604 continue;
1605 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
1606 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
1607 continue;
1608
1609 return table_reloc;
1610 }
1611
1612 return NULL;
1613 }
1614
1615 /*
1616 * First pass: Mark the head of each jump table so that in the next pass,
1617 * we know when a given jump table ends and the next one starts.
1618 */
mark_func_jump_tables(struct objtool_file * file,struct symbol * func)1619 static void mark_func_jump_tables(struct objtool_file *file,
1620 struct symbol *func)
1621 {
1622 struct instruction *insn, *last = NULL;
1623 struct reloc *reloc;
1624
1625 func_for_each_insn(file, func, insn) {
1626 if (!last)
1627 last = insn;
1628
1629 /*
1630 * Store back-pointers for unconditional forward jumps such
1631 * that find_jump_table() can back-track using those and
1632 * avoid some potentially confusing code.
1633 */
1634 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
1635 insn->offset > last->offset &&
1636 insn->jump_dest->offset > insn->offset &&
1637 !insn->jump_dest->first_jump_src) {
1638
1639 insn->jump_dest->first_jump_src = insn;
1640 last = insn->jump_dest;
1641 }
1642
1643 if (insn->type != INSN_JUMP_DYNAMIC)
1644 continue;
1645
1646 reloc = find_jump_table(file, func, insn);
1647 if (reloc) {
1648 reloc->jump_table_start = true;
1649 insn->jump_table = reloc;
1650 }
1651 }
1652 }
1653
add_func_jump_tables(struct objtool_file * file,struct symbol * func)1654 static int add_func_jump_tables(struct objtool_file *file,
1655 struct symbol *func)
1656 {
1657 struct instruction *insn;
1658 int ret;
1659
1660 func_for_each_insn(file, func, insn) {
1661 if (!insn->jump_table)
1662 continue;
1663
1664 ret = add_jump_table(file, insn, insn->jump_table);
1665 if (ret)
1666 return ret;
1667 }
1668
1669 return 0;
1670 }
1671
1672 /*
1673 * For some switch statements, gcc generates a jump table in the .rodata
1674 * section which contains a list of addresses within the function to jump to.
1675 * This finds these jump tables and adds them to the insn->alts lists.
1676 */
add_jump_table_alts(struct objtool_file * file)1677 static int add_jump_table_alts(struct objtool_file *file)
1678 {
1679 struct section *sec;
1680 struct symbol *func;
1681 int ret;
1682
1683 if (!file->rodata)
1684 return 0;
1685
1686 for_each_sec(file, sec) {
1687 list_for_each_entry(func, &sec->symbol_list, list) {
1688 if (func->type != STT_FUNC)
1689 continue;
1690
1691 mark_func_jump_tables(file, func);
1692 ret = add_func_jump_tables(file, func);
1693 if (ret)
1694 return ret;
1695 }
1696 }
1697
1698 return 0;
1699 }
1700
set_func_state(struct cfi_state * state)1701 static void set_func_state(struct cfi_state *state)
1702 {
1703 state->cfa = initial_func_cfi.cfa;
1704 memcpy(&state->regs, &initial_func_cfi.regs,
1705 CFI_NUM_REGS * sizeof(struct cfi_reg));
1706 state->stack_size = initial_func_cfi.cfa.offset;
1707 }
1708
read_unwind_hints(struct objtool_file * file)1709 static int read_unwind_hints(struct objtool_file *file)
1710 {
1711 struct cfi_state cfi = init_cfi;
1712 struct section *sec, *relocsec;
1713 struct unwind_hint *hint;
1714 struct instruction *insn;
1715 struct reloc *reloc;
1716 int i;
1717
1718 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
1719 if (!sec)
1720 return 0;
1721
1722 relocsec = sec->reloc;
1723 if (!relocsec) {
1724 WARN("missing .rela.discard.unwind_hints section");
1725 return -1;
1726 }
1727
1728 if (sec->len % sizeof(struct unwind_hint)) {
1729 WARN("struct unwind_hint size mismatch");
1730 return -1;
1731 }
1732
1733 file->hints = true;
1734
1735 for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
1736 hint = (struct unwind_hint *)sec->data->d_buf + i;
1737
1738 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
1739 if (!reloc) {
1740 WARN("can't find reloc for unwind_hints[%d]", i);
1741 return -1;
1742 }
1743
1744 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1745 if (!insn) {
1746 WARN("can't find insn for unwind_hints[%d]", i);
1747 return -1;
1748 }
1749
1750 insn->hint = true;
1751
1752 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
1753 insn->hint = false;
1754 insn->save = true;
1755 continue;
1756 }
1757
1758 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
1759 insn->restore = true;
1760 continue;
1761 }
1762
1763 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
1764 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
1765
1766 if (sym && sym->bind == STB_GLOBAL) {
1767 insn->entry = 1;
1768 }
1769 }
1770
1771 if (hint->type == UNWIND_HINT_TYPE_ENTRY) {
1772 hint->type = UNWIND_HINT_TYPE_CALL;
1773 insn->entry = 1;
1774 }
1775
1776 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
1777 insn->cfi = &func_cfi;
1778 continue;
1779 }
1780
1781 if (insn->cfi)
1782 cfi = *(insn->cfi);
1783
1784 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
1785 WARN_FUNC("unsupported unwind_hint sp base reg %d",
1786 insn->sec, insn->offset, hint->sp_reg);
1787 return -1;
1788 }
1789
1790 cfi.cfa.offset = hint->sp_offset;
1791 cfi.type = hint->type;
1792 cfi.end = hint->end;
1793
1794 insn->cfi = cfi_hash_find_or_add(&cfi);
1795 }
1796
1797 return 0;
1798 }
1799
read_retpoline_hints(struct objtool_file * file)1800 static int read_retpoline_hints(struct objtool_file *file)
1801 {
1802 struct section *sec;
1803 struct instruction *insn;
1804 struct reloc *reloc;
1805
1806 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
1807 if (!sec)
1808 return 0;
1809
1810 list_for_each_entry(reloc, &sec->reloc_list, list) {
1811 if (reloc->sym->type != STT_SECTION) {
1812 WARN("unexpected relocation symbol type in %s", sec->name);
1813 return -1;
1814 }
1815
1816 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1817 if (!insn) {
1818 WARN("bad .discard.retpoline_safe entry");
1819 return -1;
1820 }
1821
1822 if (insn->type != INSN_JUMP_DYNAMIC &&
1823 insn->type != INSN_CALL_DYNAMIC &&
1824 insn->type != INSN_RETURN &&
1825 insn->type != INSN_NOP) {
1826 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop",
1827 insn->sec, insn->offset);
1828 return -1;
1829 }
1830
1831 insn->retpoline_safe = true;
1832 }
1833
1834 return 0;
1835 }
1836
read_instr_hints(struct objtool_file * file)1837 static int read_instr_hints(struct objtool_file *file)
1838 {
1839 struct section *sec;
1840 struct instruction *insn;
1841 struct reloc *reloc;
1842
1843 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
1844 if (!sec)
1845 return 0;
1846
1847 list_for_each_entry(reloc, &sec->reloc_list, list) {
1848 if (reloc->sym->type != STT_SECTION) {
1849 WARN("unexpected relocation symbol type in %s", sec->name);
1850 return -1;
1851 }
1852
1853 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1854 if (!insn) {
1855 WARN("bad .discard.instr_end entry");
1856 return -1;
1857 }
1858
1859 insn->instr--;
1860 }
1861
1862 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
1863 if (!sec)
1864 return 0;
1865
1866 list_for_each_entry(reloc, &sec->reloc_list, list) {
1867 if (reloc->sym->type != STT_SECTION) {
1868 WARN("unexpected relocation symbol type in %s", sec->name);
1869 return -1;
1870 }
1871
1872 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1873 if (!insn) {
1874 WARN("bad .discard.instr_begin entry");
1875 return -1;
1876 }
1877
1878 insn->instr++;
1879 }
1880
1881 return 0;
1882 }
1883
read_intra_function_calls(struct objtool_file * file)1884 static int read_intra_function_calls(struct objtool_file *file)
1885 {
1886 struct instruction *insn;
1887 struct section *sec;
1888 struct reloc *reloc;
1889
1890 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
1891 if (!sec)
1892 return 0;
1893
1894 list_for_each_entry(reloc, &sec->reloc_list, list) {
1895 unsigned long dest_off;
1896
1897 if (reloc->sym->type != STT_SECTION) {
1898 WARN("unexpected relocation symbol type in %s",
1899 sec->name);
1900 return -1;
1901 }
1902
1903 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1904 if (!insn) {
1905 WARN("bad .discard.intra_function_call entry");
1906 return -1;
1907 }
1908
1909 if (insn->type != INSN_CALL) {
1910 WARN_FUNC("intra_function_call not a direct call",
1911 insn->sec, insn->offset);
1912 return -1;
1913 }
1914
1915 /*
1916 * Treat intra-function CALLs as JMPs, but with a stack_op.
1917 * See add_call_destinations(), which strips stack_ops from
1918 * normal CALLs.
1919 */
1920 insn->type = INSN_JUMP_UNCONDITIONAL;
1921
1922 dest_off = insn->offset + insn->len + insn->immediate;
1923 insn->jump_dest = find_insn(file, insn->sec, dest_off);
1924 if (!insn->jump_dest) {
1925 WARN_FUNC("can't find call dest at %s+0x%lx",
1926 insn->sec, insn->offset,
1927 insn->sec->name, dest_off);
1928 return -1;
1929 }
1930 }
1931
1932 return 0;
1933 }
1934
classify_symbols(struct objtool_file * file)1935 static int classify_symbols(struct objtool_file *file)
1936 {
1937 struct section *sec;
1938 struct symbol *func;
1939
1940 for_each_sec(file, sec) {
1941 list_for_each_entry(func, &sec->symbol_list, list) {
1942 if (func->bind != STB_GLOBAL)
1943 continue;
1944
1945 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
1946 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
1947 func->static_call_tramp = true;
1948
1949 if (arch_is_retpoline(func))
1950 func->retpoline_thunk = true;
1951
1952 if (arch_is_rethunk(func))
1953 func->return_thunk = true;
1954
1955 if (!strcmp(func->name, "__fentry__"))
1956 func->fentry = true;
1957
1958 if (!strncmp(func->name, "__sanitizer_cov_", 16))
1959 func->kcov = true;
1960 }
1961 }
1962
1963 return 0;
1964 }
1965
mark_rodata(struct objtool_file * file)1966 static void mark_rodata(struct objtool_file *file)
1967 {
1968 struct section *sec;
1969 bool found = false;
1970
1971 /*
1972 * Search for the following rodata sections, each of which can
1973 * potentially contain jump tables:
1974 *
1975 * - .rodata: can contain GCC switch tables
1976 * - .rodata.<func>: same, if -fdata-sections is being used
1977 * - .rodata..c_jump_table: contains C annotated jump tables
1978 *
1979 * .rodata.str1.* sections are ignored; they don't contain jump tables.
1980 */
1981 for_each_sec(file, sec) {
1982 if (!strncmp(sec->name, ".rodata", 7) &&
1983 !strstr(sec->name, ".str1.")) {
1984 sec->rodata = true;
1985 found = true;
1986 }
1987 }
1988
1989 file->rodata = found;
1990 }
1991
decode_sections(struct objtool_file * file)1992 static int decode_sections(struct objtool_file *file)
1993 {
1994 int ret;
1995
1996 mark_rodata(file);
1997
1998 ret = decode_instructions(file);
1999 if (ret)
2000 return ret;
2001
2002 ret = add_dead_ends(file);
2003 if (ret)
2004 return ret;
2005
2006 add_ignores(file);
2007 add_uaccess_safe(file);
2008
2009 ret = add_ignore_alternatives(file);
2010 if (ret)
2011 return ret;
2012
2013 /*
2014 * Must be before add_{jump_call}_destination.
2015 */
2016 ret = classify_symbols(file);
2017 if (ret)
2018 return ret;
2019
2020 /*
2021 * Must be before add_special_section_alts() as that depends on
2022 * jump_dest being set.
2023 */
2024 ret = add_jump_destinations(file);
2025 if (ret)
2026 return ret;
2027
2028 ret = add_special_section_alts(file);
2029 if (ret)
2030 return ret;
2031
2032 /*
2033 * Must be before add_call_destination(); it changes INSN_CALL to
2034 * INSN_JUMP.
2035 */
2036 ret = read_intra_function_calls(file);
2037 if (ret)
2038 return ret;
2039
2040 ret = add_call_destinations(file);
2041 if (ret)
2042 return ret;
2043
2044 ret = add_jump_table_alts(file);
2045 if (ret)
2046 return ret;
2047
2048 ret = read_unwind_hints(file);
2049 if (ret)
2050 return ret;
2051
2052 ret = read_retpoline_hints(file);
2053 if (ret)
2054 return ret;
2055
2056 ret = read_instr_hints(file);
2057 if (ret)
2058 return ret;
2059
2060 return 0;
2061 }
2062
is_fentry_call(struct instruction * insn)2063 static bool is_fentry_call(struct instruction *insn)
2064 {
2065 if (insn->type == INSN_CALL &&
2066 insn->call_dest &&
2067 insn->call_dest->fentry)
2068 return true;
2069
2070 return false;
2071 }
2072
has_modified_stack_frame(struct instruction * insn,struct insn_state * state)2073 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2074 {
2075 struct cfi_state *cfi = &state->cfi;
2076 int i;
2077
2078 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2079 return true;
2080
2081 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2082 return true;
2083
2084 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2085 return true;
2086
2087 for (i = 0; i < CFI_NUM_REGS; i++) {
2088 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2089 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2090 return true;
2091 }
2092
2093 return false;
2094 }
2095
has_valid_stack_frame(struct insn_state * state)2096 static bool has_valid_stack_frame(struct insn_state *state)
2097 {
2098 struct cfi_state *cfi = &state->cfi;
2099
2100 if (cfi->cfa.base == CFI_BP && cfi->regs[CFI_BP].base == CFI_CFA &&
2101 cfi->regs[CFI_BP].offset == -16)
2102 return true;
2103
2104 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2105 return true;
2106
2107 return false;
2108 }
2109
update_cfi_state_regs(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2110 static int update_cfi_state_regs(struct instruction *insn,
2111 struct cfi_state *cfi,
2112 struct stack_op *op)
2113 {
2114 struct cfi_reg *cfa = &cfi->cfa;
2115
2116 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2117 return 0;
2118
2119 /* push */
2120 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2121 cfa->offset += 8;
2122
2123 /* pop */
2124 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2125 cfa->offset -= 8;
2126
2127 /* add immediate to sp */
2128 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2129 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2130 cfa->offset -= op->src.offset;
2131
2132 return 0;
2133 }
2134
save_reg(struct cfi_state * cfi,unsigned char reg,int base,int offset)2135 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2136 {
2137 if (arch_callee_saved_reg(reg) &&
2138 cfi->regs[reg].base == CFI_UNDEFINED) {
2139 cfi->regs[reg].base = base;
2140 cfi->regs[reg].offset = offset;
2141 }
2142 }
2143
restore_reg(struct cfi_state * cfi,unsigned char reg)2144 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2145 {
2146 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2147 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2148 }
2149
2150 /*
2151 * A note about DRAP stack alignment:
2152 *
2153 * GCC has the concept of a DRAP register, which is used to help keep track of
2154 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2155 * register. The typical DRAP pattern is:
2156 *
2157 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2158 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2159 * 41 ff 72 f8 pushq -0x8(%r10)
2160 * 55 push %rbp
2161 * 48 89 e5 mov %rsp,%rbp
2162 * (more pushes)
2163 * 41 52 push %r10
2164 * ...
2165 * 41 5a pop %r10
2166 * (more pops)
2167 * 5d pop %rbp
2168 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2169 * c3 retq
2170 *
2171 * There are some variations in the epilogues, like:
2172 *
2173 * 5b pop %rbx
2174 * 41 5a pop %r10
2175 * 41 5c pop %r12
2176 * 41 5d pop %r13
2177 * 41 5e pop %r14
2178 * c9 leaveq
2179 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2180 * c3 retq
2181 *
2182 * and:
2183 *
2184 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2185 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2186 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2187 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2188 * c9 leaveq
2189 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2190 * c3 retq
2191 *
2192 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2193 * restored beforehand:
2194 *
2195 * 41 55 push %r13
2196 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2197 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2198 * ...
2199 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2200 * 41 5d pop %r13
2201 * c3 retq
2202 */
update_cfi_state(struct instruction * insn,struct cfi_state * cfi,struct stack_op * op)2203 static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi,
2204 struct stack_op *op)
2205 {
2206 struct cfi_reg *cfa = &cfi->cfa;
2207 struct cfi_reg *regs = cfi->regs;
2208
2209 /* stack operations don't make sense with an undefined CFA */
2210 if (cfa->base == CFI_UNDEFINED) {
2211 if (insn->func) {
2212 WARN_FUNC("undefined stack state", insn->sec, insn->offset);
2213 return -1;
2214 }
2215 return 0;
2216 }
2217
2218 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2219 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2220 return update_cfi_state_regs(insn, cfi, op);
2221
2222 switch (op->dest.type) {
2223
2224 case OP_DEST_REG:
2225 switch (op->src.type) {
2226
2227 case OP_SRC_REG:
2228 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2229 cfa->base == CFI_SP &&
2230 regs[CFI_BP].base == CFI_CFA &&
2231 regs[CFI_BP].offset == -cfa->offset) {
2232
2233 /* mov %rsp, %rbp */
2234 cfa->base = op->dest.reg;
2235 cfi->bp_scratch = false;
2236 }
2237
2238 else if (op->src.reg == CFI_SP &&
2239 op->dest.reg == CFI_BP && cfi->drap) {
2240
2241 /* drap: mov %rsp, %rbp */
2242 regs[CFI_BP].base = CFI_BP;
2243 regs[CFI_BP].offset = -cfi->stack_size;
2244 cfi->bp_scratch = false;
2245 }
2246
2247 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2248
2249 /*
2250 * mov %rsp, %reg
2251 *
2252 * This is needed for the rare case where GCC
2253 * does:
2254 *
2255 * mov %rsp, %rax
2256 * ...
2257 * mov %rax, %rsp
2258 */
2259 cfi->vals[op->dest.reg].base = CFI_CFA;
2260 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2261 }
2262
2263 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2264 cfa->base == CFI_BP) {
2265
2266 /*
2267 * mov %rbp, %rsp
2268 *
2269 * Restore the original stack pointer (Clang).
2270 */
2271 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2272 }
2273
2274 else if (op->dest.reg == cfa->base) {
2275
2276 /* mov %reg, %rsp */
2277 if (cfa->base == CFI_SP &&
2278 cfi->vals[op->src.reg].base == CFI_CFA) {
2279
2280 /*
2281 * This is needed for the rare case
2282 * where GCC does something dumb like:
2283 *
2284 * lea 0x8(%rsp), %rcx
2285 * ...
2286 * mov %rcx, %rsp
2287 */
2288 cfa->offset = -cfi->vals[op->src.reg].offset;
2289 cfi->stack_size = cfa->offset;
2290
2291 } else {
2292 cfa->base = CFI_UNDEFINED;
2293 cfa->offset = 0;
2294 }
2295 }
2296
2297 break;
2298
2299 case OP_SRC_ADD:
2300 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2301
2302 /* add imm, %rsp */
2303 cfi->stack_size -= op->src.offset;
2304 if (cfa->base == CFI_SP)
2305 cfa->offset -= op->src.offset;
2306 break;
2307 }
2308
2309 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2310
2311 /* lea disp(%rbp), %rsp */
2312 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2313 break;
2314 }
2315
2316 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2317
2318 /* drap: lea disp(%rsp), %drap */
2319 cfi->drap_reg = op->dest.reg;
2320
2321 /*
2322 * lea disp(%rsp), %reg
2323 *
2324 * This is needed for the rare case where GCC
2325 * does something dumb like:
2326 *
2327 * lea 0x8(%rsp), %rcx
2328 * ...
2329 * mov %rcx, %rsp
2330 */
2331 cfi->vals[op->dest.reg].base = CFI_CFA;
2332 cfi->vals[op->dest.reg].offset = \
2333 -cfi->stack_size + op->src.offset;
2334
2335 break;
2336 }
2337
2338 if (cfi->drap && op->dest.reg == CFI_SP &&
2339 op->src.reg == cfi->drap_reg) {
2340
2341 /* drap: lea disp(%drap), %rsp */
2342 cfa->base = CFI_SP;
2343 cfa->offset = cfi->stack_size = -op->src.offset;
2344 cfi->drap_reg = CFI_UNDEFINED;
2345 cfi->drap = false;
2346 break;
2347 }
2348
2349 if (op->dest.reg == cfi->cfa.base) {
2350 WARN_FUNC("unsupported stack register modification",
2351 insn->sec, insn->offset);
2352 return -1;
2353 }
2354
2355 break;
2356
2357 case OP_SRC_AND:
2358 if (op->dest.reg != CFI_SP ||
2359 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2360 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2361 WARN_FUNC("unsupported stack pointer realignment",
2362 insn->sec, insn->offset);
2363 return -1;
2364 }
2365
2366 if (cfi->drap_reg != CFI_UNDEFINED) {
2367 /* drap: and imm, %rsp */
2368 cfa->base = cfi->drap_reg;
2369 cfa->offset = cfi->stack_size = 0;
2370 cfi->drap = true;
2371 }
2372
2373 /*
2374 * Older versions of GCC (4.8ish) realign the stack
2375 * without DRAP, with a frame pointer.
2376 */
2377
2378 break;
2379
2380 case OP_SRC_POP:
2381 case OP_SRC_POPF:
2382 if (!cfi->drap && op->dest.reg == cfa->base) {
2383
2384 /* pop %rbp */
2385 cfa->base = CFI_SP;
2386 }
2387
2388 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
2389 op->dest.reg == cfi->drap_reg &&
2390 cfi->drap_offset == -cfi->stack_size) {
2391
2392 /* drap: pop %drap */
2393 cfa->base = cfi->drap_reg;
2394 cfa->offset = 0;
2395 cfi->drap_offset = -1;
2396
2397 } else if (regs[op->dest.reg].offset == -cfi->stack_size) {
2398
2399 /* pop %reg */
2400 restore_reg(cfi, op->dest.reg);
2401 }
2402
2403 cfi->stack_size -= 8;
2404 if (cfa->base == CFI_SP)
2405 cfa->offset -= 8;
2406
2407 break;
2408
2409 case OP_SRC_REG_INDIRECT:
2410 if (cfi->drap && op->src.reg == CFI_BP &&
2411 op->src.offset == cfi->drap_offset) {
2412
2413 /* drap: mov disp(%rbp), %drap */
2414 cfa->base = cfi->drap_reg;
2415 cfa->offset = 0;
2416 cfi->drap_offset = -1;
2417 }
2418
2419 if (cfi->drap && op->src.reg == CFI_BP &&
2420 op->src.offset == regs[op->dest.reg].offset) {
2421
2422 /* drap: mov disp(%rbp), %reg */
2423 restore_reg(cfi, op->dest.reg);
2424
2425 } else if (op->src.reg == cfa->base &&
2426 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
2427
2428 /* mov disp(%rbp), %reg */
2429 /* mov disp(%rsp), %reg */
2430 restore_reg(cfi, op->dest.reg);
2431 }
2432
2433 break;
2434
2435 default:
2436 WARN_FUNC("unknown stack-related instruction",
2437 insn->sec, insn->offset);
2438 return -1;
2439 }
2440
2441 break;
2442
2443 case OP_DEST_PUSH:
2444 case OP_DEST_PUSHF:
2445 cfi->stack_size += 8;
2446 if (cfa->base == CFI_SP)
2447 cfa->offset += 8;
2448
2449 if (op->src.type != OP_SRC_REG)
2450 break;
2451
2452 if (cfi->drap) {
2453 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2454
2455 /* drap: push %drap */
2456 cfa->base = CFI_BP_INDIRECT;
2457 cfa->offset = -cfi->stack_size;
2458
2459 /* save drap so we know when to restore it */
2460 cfi->drap_offset = -cfi->stack_size;
2461
2462 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
2463
2464 /* drap: push %rbp */
2465 cfi->stack_size = 0;
2466
2467 } else {
2468
2469 /* drap: push %reg */
2470 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
2471 }
2472
2473 } else {
2474
2475 /* push %reg */
2476 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
2477 }
2478
2479 /* detect when asm code uses rbp as a scratch register */
2480 if (!no_fp && insn->func && op->src.reg == CFI_BP &&
2481 cfa->base != CFI_BP)
2482 cfi->bp_scratch = true;
2483 break;
2484
2485 case OP_DEST_REG_INDIRECT:
2486
2487 if (cfi->drap) {
2488 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
2489
2490 /* drap: mov %drap, disp(%rbp) */
2491 cfa->base = CFI_BP_INDIRECT;
2492 cfa->offset = op->dest.offset;
2493
2494 /* save drap offset so we know when to restore it */
2495 cfi->drap_offset = op->dest.offset;
2496 } else {
2497
2498 /* drap: mov reg, disp(%rbp) */
2499 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
2500 }
2501
2502 } else if (op->dest.reg == cfa->base) {
2503
2504 /* mov reg, disp(%rbp) */
2505 /* mov reg, disp(%rsp) */
2506 save_reg(cfi, op->src.reg, CFI_CFA,
2507 op->dest.offset - cfi->cfa.offset);
2508 }
2509
2510 break;
2511
2512 case OP_DEST_LEAVE:
2513 if ((!cfi->drap && cfa->base != CFI_BP) ||
2514 (cfi->drap && cfa->base != cfi->drap_reg)) {
2515 WARN_FUNC("leave instruction with modified stack frame",
2516 insn->sec, insn->offset);
2517 return -1;
2518 }
2519
2520 /* leave (mov %rbp, %rsp; pop %rbp) */
2521
2522 cfi->stack_size = -cfi->regs[CFI_BP].offset - 8;
2523 restore_reg(cfi, CFI_BP);
2524
2525 if (!cfi->drap) {
2526 cfa->base = CFI_SP;
2527 cfa->offset -= 8;
2528 }
2529
2530 break;
2531
2532 case OP_DEST_MEM:
2533 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
2534 WARN_FUNC("unknown stack-related memory operation",
2535 insn->sec, insn->offset);
2536 return -1;
2537 }
2538
2539 /* pop mem */
2540 cfi->stack_size -= 8;
2541 if (cfa->base == CFI_SP)
2542 cfa->offset -= 8;
2543
2544 break;
2545
2546 default:
2547 WARN_FUNC("unknown stack-related instruction",
2548 insn->sec, insn->offset);
2549 return -1;
2550 }
2551
2552 return 0;
2553 }
2554
2555 /*
2556 * The stack layouts of alternatives instructions can sometimes diverge when
2557 * they have stack modifications. That's fine as long as the potential stack
2558 * layouts don't conflict at any given potential instruction boundary.
2559 *
2560 * Flatten the CFIs of the different alternative code streams (both original
2561 * and replacement) into a single shared CFI array which can be used to detect
2562 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
2563 */
propagate_alt_cfi(struct objtool_file * file,struct instruction * insn)2564 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
2565 {
2566 struct cfi_state **alt_cfi;
2567 int group_off;
2568
2569 if (!insn->alt_group)
2570 return 0;
2571
2572 if (!insn->cfi) {
2573 WARN("CFI missing");
2574 return -1;
2575 }
2576
2577 alt_cfi = insn->alt_group->cfi;
2578 group_off = insn->offset - insn->alt_group->first_insn->offset;
2579
2580 if (!alt_cfi[group_off]) {
2581 alt_cfi[group_off] = insn->cfi;
2582 } else {
2583 if (cficmp(alt_cfi[group_off], insn->cfi)) {
2584 WARN_FUNC("stack layout conflict in alternatives",
2585 insn->sec, insn->offset);
2586 return -1;
2587 }
2588 }
2589
2590 return 0;
2591 }
2592
handle_insn_ops(struct instruction * insn,struct insn_state * state)2593 static int handle_insn_ops(struct instruction *insn, struct insn_state *state)
2594 {
2595 struct stack_op *op;
2596
2597 list_for_each_entry(op, &insn->stack_ops, list) {
2598
2599 if (update_cfi_state(insn, &state->cfi, op))
2600 return 1;
2601
2602 if (op->dest.type == OP_DEST_PUSHF) {
2603 if (!state->uaccess_stack) {
2604 state->uaccess_stack = 1;
2605 } else if (state->uaccess_stack >> 31) {
2606 WARN_FUNC("PUSHF stack exhausted",
2607 insn->sec, insn->offset);
2608 return 1;
2609 }
2610 state->uaccess_stack <<= 1;
2611 state->uaccess_stack |= state->uaccess;
2612 }
2613
2614 if (op->src.type == OP_SRC_POPF) {
2615 if (state->uaccess_stack) {
2616 state->uaccess = state->uaccess_stack & 1;
2617 state->uaccess_stack >>= 1;
2618 if (state->uaccess_stack == 1)
2619 state->uaccess_stack = 0;
2620 }
2621 }
2622 }
2623
2624 return 0;
2625 }
2626
insn_cfi_match(struct instruction * insn,struct cfi_state * cfi2)2627 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
2628 {
2629 struct cfi_state *cfi1 = insn->cfi;
2630 int i;
2631
2632 if (!cfi1) {
2633 WARN("CFI missing");
2634 return false;
2635 }
2636
2637 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
2638
2639 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
2640 insn->sec, insn->offset,
2641 cfi1->cfa.base, cfi1->cfa.offset,
2642 cfi2->cfa.base, cfi2->cfa.offset);
2643
2644 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
2645 for (i = 0; i < CFI_NUM_REGS; i++) {
2646 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
2647 sizeof(struct cfi_reg)))
2648 continue;
2649
2650 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
2651 insn->sec, insn->offset,
2652 i, cfi1->regs[i].base, cfi1->regs[i].offset,
2653 i, cfi2->regs[i].base, cfi2->regs[i].offset);
2654 break;
2655 }
2656
2657 } else if (cfi1->type != cfi2->type) {
2658
2659 WARN_FUNC("stack state mismatch: type1=%d type2=%d",
2660 insn->sec, insn->offset, cfi1->type, cfi2->type);
2661
2662 } else if (cfi1->drap != cfi2->drap ||
2663 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
2664 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
2665
2666 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
2667 insn->sec, insn->offset,
2668 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
2669 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
2670
2671 } else
2672 return true;
2673
2674 return false;
2675 }
2676
func_uaccess_safe(struct symbol * func)2677 static inline bool func_uaccess_safe(struct symbol *func)
2678 {
2679 if (func)
2680 return func->uaccess_safe;
2681
2682 return false;
2683 }
2684
call_dest_name(struct instruction * insn)2685 static inline const char *call_dest_name(struct instruction *insn)
2686 {
2687 if (insn->call_dest)
2688 return insn->call_dest->name;
2689
2690 return "{dynamic}";
2691 }
2692
noinstr_call_dest(struct symbol * func)2693 static inline bool noinstr_call_dest(struct symbol *func)
2694 {
2695 /*
2696 * We can't deal with indirect function calls at present;
2697 * assume they're instrumented.
2698 */
2699 if (!func)
2700 return false;
2701
2702 /*
2703 * If the symbol is from a noinstr section; we good.
2704 */
2705 if (func->sec->noinstr)
2706 return true;
2707
2708 /*
2709 * The __ubsan_handle_*() calls are like WARN(), they only happen when
2710 * something 'BAD' happened. At the risk of taking the machine down,
2711 * let them proceed to get the message out.
2712 */
2713 if (!strncmp(func->name, "__ubsan_handle_", 15))
2714 return true;
2715
2716 return false;
2717 }
2718
validate_call(struct instruction * insn,struct insn_state * state)2719 static int validate_call(struct instruction *insn, struct insn_state *state)
2720 {
2721 if (state->noinstr && state->instr <= 0 &&
2722 !noinstr_call_dest(insn->call_dest)) {
2723 WARN_FUNC("call to %s() leaves .noinstr.text section",
2724 insn->sec, insn->offset, call_dest_name(insn));
2725 return 1;
2726 }
2727
2728 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
2729 WARN_FUNC("call to %s() with UACCESS enabled",
2730 insn->sec, insn->offset, call_dest_name(insn));
2731 return 1;
2732 }
2733
2734 if (state->df) {
2735 WARN_FUNC("call to %s() with DF set",
2736 insn->sec, insn->offset, call_dest_name(insn));
2737 return 1;
2738 }
2739
2740 return 0;
2741 }
2742
validate_sibling_call(struct instruction * insn,struct insn_state * state)2743 static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
2744 {
2745 if (has_modified_stack_frame(insn, state)) {
2746 WARN_FUNC("sibling call from callable instruction with modified stack frame",
2747 insn->sec, insn->offset);
2748 return 1;
2749 }
2750
2751 return validate_call(insn, state);
2752 }
2753
validate_return(struct symbol * func,struct instruction * insn,struct insn_state * state)2754 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
2755 {
2756 if (state->noinstr && state->instr > 0) {
2757 WARN_FUNC("return with instrumentation enabled",
2758 insn->sec, insn->offset);
2759 return 1;
2760 }
2761
2762 if (state->uaccess && !func_uaccess_safe(func)) {
2763 WARN_FUNC("return with UACCESS enabled",
2764 insn->sec, insn->offset);
2765 return 1;
2766 }
2767
2768 if (!state->uaccess && func_uaccess_safe(func)) {
2769 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
2770 insn->sec, insn->offset);
2771 return 1;
2772 }
2773
2774 if (state->df) {
2775 WARN_FUNC("return with DF set",
2776 insn->sec, insn->offset);
2777 return 1;
2778 }
2779
2780 if (func && has_modified_stack_frame(insn, state)) {
2781 WARN_FUNC("return with modified stack frame",
2782 insn->sec, insn->offset);
2783 return 1;
2784 }
2785
2786 if (state->cfi.bp_scratch) {
2787 WARN_FUNC("BP used as a scratch register",
2788 insn->sec, insn->offset);
2789 return 1;
2790 }
2791
2792 return 0;
2793 }
2794
next_insn_to_validate(struct objtool_file * file,struct instruction * insn)2795 static struct instruction *next_insn_to_validate(struct objtool_file *file,
2796 struct instruction *insn)
2797 {
2798 struct alt_group *alt_group = insn->alt_group;
2799
2800 /*
2801 * Simulate the fact that alternatives are patched in-place. When the
2802 * end of a replacement alt_group is reached, redirect objtool flow to
2803 * the end of the original alt_group.
2804 */
2805 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group)
2806 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
2807
2808 return next_insn_same_sec(file, insn);
2809 }
2810
2811 /*
2812 * Follow the branch starting at the given instruction, and recursively follow
2813 * any other branches (jumps). Meanwhile, track the frame pointer state at
2814 * each instruction and validate all the rules described in
2815 * tools/objtool/Documentation/stack-validation.txt.
2816 */
validate_branch(struct objtool_file * file,struct symbol * func,struct instruction * insn,struct insn_state state)2817 static int validate_branch(struct objtool_file *file, struct symbol *func,
2818 struct instruction *insn, struct insn_state state)
2819 {
2820 struct alternative *alt;
2821 struct instruction *next_insn, *prev_insn = NULL;
2822 struct section *sec;
2823 u8 visited;
2824 int ret;
2825
2826 sec = insn->sec;
2827
2828 while (1) {
2829 next_insn = next_insn_to_validate(file, insn);
2830
2831 if (file->c_file && func && insn->func && func != insn->func->pfunc) {
2832 WARN("%s() falls through to next function %s()",
2833 func->name, insn->func->name);
2834 return 1;
2835 }
2836
2837 if (func && insn->ignore) {
2838 WARN_FUNC("BUG: why am I validating an ignored function?",
2839 sec, insn->offset);
2840 return 1;
2841 }
2842
2843 visited = VISITED_BRANCH << state.uaccess;
2844 if (insn->visited & VISITED_BRANCH_MASK) {
2845 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
2846 return 1;
2847
2848 if (insn->visited & visited)
2849 return 0;
2850 } else {
2851 nr_insns_visited++;
2852 }
2853
2854 if (state.noinstr)
2855 state.instr += insn->instr;
2856
2857 if (insn->hint) {
2858 if (insn->restore) {
2859 struct instruction *save_insn, *i;
2860
2861 i = insn;
2862 save_insn = NULL;
2863
2864 sym_for_each_insn_continue_reverse(file, func, i) {
2865 if (i->save) {
2866 save_insn = i;
2867 break;
2868 }
2869 }
2870
2871 if (!save_insn) {
2872 WARN_FUNC("no corresponding CFI save for CFI restore",
2873 sec, insn->offset);
2874 return 1;
2875 }
2876
2877 if (!save_insn->visited) {
2878 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
2879 sec, insn->offset);
2880 return 1;
2881 }
2882
2883 insn->cfi = save_insn->cfi;
2884 nr_cfi_reused++;
2885 }
2886
2887 state.cfi = *insn->cfi;
2888 } else {
2889 /* XXX track if we actually changed state.cfi */
2890
2891 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
2892 insn->cfi = prev_insn->cfi;
2893 nr_cfi_reused++;
2894 } else {
2895 insn->cfi = cfi_hash_find_or_add(&state.cfi);
2896 }
2897 }
2898
2899 insn->visited |= visited;
2900
2901 if (propagate_alt_cfi(file, insn))
2902 return 1;
2903
2904 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
2905 bool skip_orig = false;
2906
2907 list_for_each_entry(alt, &insn->alts, list) {
2908 if (alt->skip_orig)
2909 skip_orig = true;
2910
2911 ret = validate_branch(file, func, alt->insn, state);
2912 if (ret) {
2913 if (backtrace)
2914 BT_FUNC("(alt)", insn);
2915 return ret;
2916 }
2917 }
2918
2919 if (skip_orig)
2920 return 0;
2921 }
2922
2923 if (handle_insn_ops(insn, &state))
2924 return 1;
2925
2926 switch (insn->type) {
2927
2928 case INSN_RETURN:
2929 if (sls && !insn->retpoline_safe &&
2930 next_insn && next_insn->type != INSN_TRAP) {
2931 WARN_FUNC("missing int3 after ret",
2932 insn->sec, insn->offset);
2933 }
2934 return validate_return(func, insn, &state);
2935
2936 case INSN_CALL:
2937 case INSN_CALL_DYNAMIC:
2938 ret = validate_call(insn, &state);
2939 if (ret)
2940 return ret;
2941
2942 if (!no_fp && func && !is_fentry_call(insn) &&
2943 !has_valid_stack_frame(&state)) {
2944 WARN_FUNC("call without frame pointer save/setup",
2945 sec, insn->offset);
2946 return 1;
2947 }
2948
2949 if (dead_end_function(file, insn->call_dest))
2950 return 0;
2951
2952 break;
2953
2954 case INSN_JUMP_CONDITIONAL:
2955 case INSN_JUMP_UNCONDITIONAL:
2956 if (is_sibling_call(insn)) {
2957 ret = validate_sibling_call(insn, &state);
2958 if (ret)
2959 return ret;
2960
2961 } else if (insn->jump_dest) {
2962 ret = validate_branch(file, func,
2963 insn->jump_dest, state);
2964 if (ret) {
2965 if (backtrace)
2966 BT_FUNC("(branch)", insn);
2967 return ret;
2968 }
2969 }
2970
2971 if (insn->type == INSN_JUMP_UNCONDITIONAL)
2972 return 0;
2973
2974 break;
2975
2976 case INSN_JUMP_DYNAMIC:
2977 if (sls && !insn->retpoline_safe &&
2978 next_insn && next_insn->type != INSN_TRAP) {
2979 WARN_FUNC("missing int3 after indirect jump",
2980 insn->sec, insn->offset);
2981 }
2982
2983 /* fallthrough */
2984 case INSN_JUMP_DYNAMIC_CONDITIONAL:
2985 if (is_sibling_call(insn)) {
2986 ret = validate_sibling_call(insn, &state);
2987 if (ret)
2988 return ret;
2989 }
2990
2991 if (insn->type == INSN_JUMP_DYNAMIC)
2992 return 0;
2993
2994 break;
2995
2996 case INSN_CONTEXT_SWITCH:
2997 if (func && (!next_insn || !next_insn->hint)) {
2998 WARN_FUNC("unsupported instruction in callable function",
2999 sec, insn->offset);
3000 return 1;
3001 }
3002 return 0;
3003
3004 case INSN_STAC:
3005 if (state.uaccess) {
3006 WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
3007 return 1;
3008 }
3009
3010 state.uaccess = true;
3011 break;
3012
3013 case INSN_CLAC:
3014 if (!state.uaccess && func) {
3015 WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
3016 return 1;
3017 }
3018
3019 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3020 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
3021 return 1;
3022 }
3023
3024 state.uaccess = false;
3025 break;
3026
3027 case INSN_STD:
3028 if (state.df) {
3029 WARN_FUNC("recursive STD", sec, insn->offset);
3030 return 1;
3031 }
3032
3033 state.df = true;
3034 break;
3035
3036 case INSN_CLD:
3037 if (!state.df && func) {
3038 WARN_FUNC("redundant CLD", sec, insn->offset);
3039 return 1;
3040 }
3041
3042 state.df = false;
3043 break;
3044
3045 default:
3046 break;
3047 }
3048
3049 if (insn->dead_end)
3050 return 0;
3051
3052 if (!next_insn) {
3053 if (state.cfi.cfa.base == CFI_UNDEFINED)
3054 return 0;
3055 WARN("%s: unexpected end of section", sec->name);
3056 return 1;
3057 }
3058
3059 prev_insn = insn;
3060 insn = next_insn;
3061 }
3062
3063 return 0;
3064 }
3065
validate_unwind_hints(struct objtool_file * file,struct section * sec)3066 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3067 {
3068 struct instruction *insn;
3069 struct insn_state state;
3070 int ret, warnings = 0;
3071
3072 if (!file->hints)
3073 return 0;
3074
3075 init_insn_state(&state, sec);
3076
3077 if (sec) {
3078 insn = find_insn(file, sec, 0);
3079 if (!insn)
3080 return 0;
3081 } else {
3082 insn = list_first_entry(&file->insn_list, typeof(*insn), list);
3083 }
3084
3085 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) {
3086 if (insn->hint && !insn->visited) {
3087 ret = validate_branch(file, insn->func, insn, state);
3088 if (ret && backtrace)
3089 BT_FUNC("<=== (hint)", insn);
3090 warnings += ret;
3091 }
3092
3093 insn = list_next_entry(insn, list);
3094 }
3095
3096 return warnings;
3097 }
3098
3099 /*
3100 * Validate rethunk entry constraint: must untrain RET before the first RET.
3101 *
3102 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes
3103 * before an actual RET instruction.
3104 */
validate_entry(struct objtool_file * file,struct instruction * insn)3105 static int validate_entry(struct objtool_file *file, struct instruction *insn)
3106 {
3107 struct instruction *next, *dest;
3108 int ret, warnings = 0;
3109
3110 for (;;) {
3111 next = next_insn_to_validate(file, insn);
3112
3113 if (insn->visited & VISITED_ENTRY)
3114 return 0;
3115
3116 insn->visited |= VISITED_ENTRY;
3117
3118 if (!insn->ignore_alts && !list_empty(&insn->alts)) {
3119 struct alternative *alt;
3120 bool skip_orig = false;
3121
3122 list_for_each_entry(alt, &insn->alts, list) {
3123 if (alt->skip_orig)
3124 skip_orig = true;
3125
3126 ret = validate_entry(file, alt->insn);
3127 if (ret) {
3128 if (backtrace)
3129 BT_FUNC("(alt)", insn);
3130 return ret;
3131 }
3132 }
3133
3134 if (skip_orig)
3135 return 0;
3136 }
3137
3138 switch (insn->type) {
3139
3140 case INSN_CALL_DYNAMIC:
3141 case INSN_JUMP_DYNAMIC:
3142 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3143 WARN_FUNC("early indirect call", insn->sec, insn->offset);
3144 return 1;
3145
3146 case INSN_JUMP_UNCONDITIONAL:
3147 case INSN_JUMP_CONDITIONAL:
3148 if (!is_sibling_call(insn)) {
3149 if (!insn->jump_dest) {
3150 WARN_FUNC("unresolved jump target after linking?!?",
3151 insn->sec, insn->offset);
3152 return -1;
3153 }
3154 ret = validate_entry(file, insn->jump_dest);
3155 if (ret) {
3156 if (backtrace) {
3157 BT_FUNC("(branch%s)", insn,
3158 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3159 }
3160 return ret;
3161 }
3162
3163 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3164 return 0;
3165
3166 break;
3167 }
3168
3169 /* fallthrough */
3170 case INSN_CALL:
3171 dest = find_insn(file, insn->call_dest->sec,
3172 insn->call_dest->offset);
3173 if (!dest) {
3174 WARN("Unresolved function after linking!?: %s",
3175 insn->call_dest->name);
3176 return -1;
3177 }
3178
3179 ret = validate_entry(file, dest);
3180 if (ret) {
3181 if (backtrace)
3182 BT_FUNC("(call)", insn);
3183 return ret;
3184 }
3185 /*
3186 * If a call returns without error, it must have seen UNTRAIN_RET.
3187 * Therefore any non-error return is a success.
3188 */
3189 return 0;
3190
3191 case INSN_RETURN:
3192 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset);
3193 return 1;
3194
3195 case INSN_NOP:
3196 if (insn->retpoline_safe)
3197 return 0;
3198 break;
3199
3200 default:
3201 break;
3202 }
3203
3204 if (!next) {
3205 WARN_FUNC("teh end!", insn->sec, insn->offset);
3206 return -1;
3207 }
3208 insn = next;
3209 }
3210
3211 return warnings;
3212 }
3213
3214 /*
3215 * Validate that all branches starting at 'insn->entry' encounter UNRET_END
3216 * before RET.
3217 */
validate_unret(struct objtool_file * file)3218 static int validate_unret(struct objtool_file *file)
3219 {
3220 struct instruction *insn;
3221 int ret, warnings = 0;
3222
3223 for_each_insn(file, insn) {
3224 if (!insn->entry)
3225 continue;
3226
3227 ret = validate_entry(file, insn);
3228 if (ret < 0) {
3229 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset);
3230 return ret;
3231 }
3232 warnings += ret;
3233 }
3234
3235 return warnings;
3236 }
3237
validate_retpoline(struct objtool_file * file)3238 static int validate_retpoline(struct objtool_file *file)
3239 {
3240 struct instruction *insn;
3241 int warnings = 0;
3242
3243 for_each_insn(file, insn) {
3244 if (insn->type != INSN_JUMP_DYNAMIC &&
3245 insn->type != INSN_CALL_DYNAMIC &&
3246 insn->type != INSN_RETURN)
3247 continue;
3248
3249 if (insn->retpoline_safe)
3250 continue;
3251
3252 /*
3253 * .init.text code is ran before userspace and thus doesn't
3254 * strictly need retpolines, except for modules which are
3255 * loaded late, they very much do need retpoline in their
3256 * .init.text
3257 */
3258 if (!strcmp(insn->sec->name, ".init.text") && !module)
3259 continue;
3260
3261 if (insn->type == INSN_RETURN) {
3262 if (rethunk) {
3263 WARN_FUNC("'naked' return found in RETHUNK build",
3264 insn->sec, insn->offset);
3265 } else
3266 continue;
3267 } else {
3268 WARN_FUNC("indirect %s found in RETPOLINE build",
3269 insn->sec, insn->offset,
3270 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3271 }
3272
3273 warnings++;
3274 }
3275
3276 return warnings;
3277 }
3278
is_kasan_insn(struct instruction * insn)3279 static bool is_kasan_insn(struct instruction *insn)
3280 {
3281 return (insn->type == INSN_CALL &&
3282 !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
3283 }
3284
is_ubsan_insn(struct instruction * insn)3285 static bool is_ubsan_insn(struct instruction *insn)
3286 {
3287 return (insn->type == INSN_CALL &&
3288 !strcmp(insn->call_dest->name,
3289 "__ubsan_handle_builtin_unreachable"));
3290 }
3291
ignore_unreachable_insn(struct objtool_file * file,struct instruction * insn)3292 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3293 {
3294 int i;
3295 struct instruction *prev_insn;
3296
3297 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3298 return true;
3299
3300 /*
3301 * Ignore any unused exceptions. This can happen when a whitelisted
3302 * function has an exception table entry.
3303 *
3304 * Also ignore alternative replacement instructions. This can happen
3305 * when a whitelisted function uses one of the ALTERNATIVE macros.
3306 */
3307 if (!strcmp(insn->sec->name, ".fixup") ||
3308 !strcmp(insn->sec->name, ".altinstr_replacement") ||
3309 !strcmp(insn->sec->name, ".altinstr_aux"))
3310 return true;
3311
3312 if (!insn->func)
3313 return false;
3314
3315 /*
3316 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
3317 * __builtin_unreachable(). The BUG() macro has an unreachable() after
3318 * the UD2, which causes GCC's undefined trap logic to emit another UD2
3319 * (or occasionally a JMP to UD2).
3320 *
3321 * It may also insert a UD2 after calling a __noreturn function.
3322 */
3323 prev_insn = list_prev_entry(insn, list);
3324 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) &&
3325 (insn->type == INSN_BUG ||
3326 (insn->type == INSN_JUMP_UNCONDITIONAL &&
3327 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
3328 return true;
3329
3330 /*
3331 * Check if this (or a subsequent) instruction is related to
3332 * CONFIG_UBSAN or CONFIG_KASAN.
3333 *
3334 * End the search at 5 instructions to avoid going into the weeds.
3335 */
3336 for (i = 0; i < 5; i++) {
3337
3338 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
3339 return true;
3340
3341 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
3342 if (insn->jump_dest &&
3343 insn->jump_dest->func == insn->func) {
3344 insn = insn->jump_dest;
3345 continue;
3346 }
3347
3348 break;
3349 }
3350
3351 if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
3352 break;
3353
3354 insn = list_next_entry(insn, list);
3355 }
3356
3357 return false;
3358 }
3359
validate_symbol(struct objtool_file * file,struct section * sec,struct symbol * sym,struct insn_state * state)3360 static int validate_symbol(struct objtool_file *file, struct section *sec,
3361 struct symbol *sym, struct insn_state *state)
3362 {
3363 struct instruction *insn;
3364 int ret;
3365
3366 if (!sym->len) {
3367 WARN("%s() is missing an ELF size annotation", sym->name);
3368 return 1;
3369 }
3370
3371 if (sym->pfunc != sym || sym->alias != sym)
3372 return 0;
3373
3374 insn = find_insn(file, sec, sym->offset);
3375 if (!insn || insn->ignore || insn->visited)
3376 return 0;
3377
3378 state->uaccess = sym->uaccess_safe;
3379
3380 ret = validate_branch(file, insn->func, insn, *state);
3381 if (ret && backtrace)
3382 BT_FUNC("<=== (sym)", insn);
3383 return ret;
3384 }
3385
validate_section(struct objtool_file * file,struct section * sec)3386 static int validate_section(struct objtool_file *file, struct section *sec)
3387 {
3388 struct insn_state state;
3389 struct symbol *func;
3390 int warnings = 0;
3391
3392 list_for_each_entry(func, &sec->symbol_list, list) {
3393 if (func->type != STT_FUNC)
3394 continue;
3395
3396 init_insn_state(&state, sec);
3397 set_func_state(&state.cfi);
3398
3399 warnings += validate_symbol(file, sec, func, &state);
3400 }
3401
3402 return warnings;
3403 }
3404
validate_vmlinux_functions(struct objtool_file * file)3405 static int validate_vmlinux_functions(struct objtool_file *file)
3406 {
3407 struct section *sec;
3408 int warnings = 0;
3409
3410 sec = find_section_by_name(file->elf, ".noinstr.text");
3411 if (sec) {
3412 warnings += validate_section(file, sec);
3413 warnings += validate_unwind_hints(file, sec);
3414 }
3415
3416 sec = find_section_by_name(file->elf, ".entry.text");
3417 if (sec) {
3418 warnings += validate_section(file, sec);
3419 warnings += validate_unwind_hints(file, sec);
3420 }
3421
3422 return warnings;
3423 }
3424
validate_functions(struct objtool_file * file)3425 static int validate_functions(struct objtool_file *file)
3426 {
3427 struct section *sec;
3428 int warnings = 0;
3429
3430 for_each_sec(file, sec) {
3431 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
3432 continue;
3433
3434 warnings += validate_section(file, sec);
3435 }
3436
3437 return warnings;
3438 }
3439
validate_reachable_instructions(struct objtool_file * file)3440 static int validate_reachable_instructions(struct objtool_file *file)
3441 {
3442 struct instruction *insn;
3443
3444 if (file->ignore_unreachables)
3445 return 0;
3446
3447 for_each_insn(file, insn) {
3448 if (insn->visited || ignore_unreachable_insn(file, insn))
3449 continue;
3450
3451 WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
3452 return 1;
3453 }
3454
3455 return 0;
3456 }
3457
check(struct objtool_file * file)3458 int check(struct objtool_file *file)
3459 {
3460 int ret, warnings = 0;
3461
3462 arch_initial_func_cfi_state(&initial_func_cfi);
3463 init_cfi_state(&init_cfi);
3464 init_cfi_state(&func_cfi);
3465 set_func_state(&func_cfi);
3466
3467 if (!cfi_hash_alloc())
3468 goto out;
3469
3470 cfi_hash_add(&init_cfi);
3471 cfi_hash_add(&func_cfi);
3472
3473 ret = decode_sections(file);
3474 if (ret < 0)
3475 goto out;
3476
3477 warnings += ret;
3478
3479 if (list_empty(&file->insn_list))
3480 goto out;
3481
3482 if (vmlinux && !validate_dup) {
3483 ret = validate_vmlinux_functions(file);
3484 if (ret < 0)
3485 goto out;
3486
3487 warnings += ret;
3488 goto out;
3489 }
3490
3491 if (retpoline) {
3492 ret = validate_retpoline(file);
3493 if (ret < 0)
3494 return ret;
3495 warnings += ret;
3496 }
3497
3498 ret = validate_functions(file);
3499 if (ret < 0)
3500 goto out;
3501 warnings += ret;
3502
3503 ret = validate_unwind_hints(file, NULL);
3504 if (ret < 0)
3505 goto out;
3506 warnings += ret;
3507
3508 if (unret) {
3509 /*
3510 * Must be after validate_branch() and friends, it plays
3511 * further games with insn->visited.
3512 */
3513 ret = validate_unret(file);
3514 if (ret < 0)
3515 return ret;
3516 warnings += ret;
3517 }
3518
3519 if (!warnings) {
3520 ret = validate_reachable_instructions(file);
3521 if (ret < 0)
3522 goto out;
3523 warnings += ret;
3524 }
3525
3526 ret = create_static_call_sections(file);
3527 if (ret < 0)
3528 goto out;
3529 warnings += ret;
3530
3531 if (retpoline) {
3532 ret = create_retpoline_sites_sections(file);
3533 if (ret < 0)
3534 goto out;
3535 warnings += ret;
3536 }
3537
3538 if (rethunk) {
3539 ret = create_return_sites_sections(file);
3540 if (ret < 0)
3541 goto out;
3542 warnings += ret;
3543 }
3544
3545 if (stats) {
3546 printf("nr_insns_visited: %ld\n", nr_insns_visited);
3547 printf("nr_cfi: %ld\n", nr_cfi);
3548 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
3549 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
3550 }
3551
3552 out:
3553 /*
3554 * For now, don't fail the kernel build on fatal warnings. These
3555 * errors are still fairly common due to the growing matrix of
3556 * supported toolchains and their recent pace of change.
3557 */
3558 return 0;
3559 }
3560