• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #define DISABLE_BRANCH_PROFILING
2 #define pr_fmt(fmt) "kasan: " fmt
3 #include <linux/bootmem.h>
4 #include <linux/kasan.h>
5 #include <linux/kdebug.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/vmalloc.h>
9 
10 #include <asm/tlbflush.h>
11 #include <asm/sections.h>
12 
13 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
14 extern struct range pfn_mapped[E820_X_MAX];
15 
map_range(struct range * range)16 static int __init map_range(struct range *range)
17 {
18 	unsigned long start;
19 	unsigned long end;
20 
21 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
22 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
23 
24 	/*
25 	 * end + 1 here is intentional. We check several shadow bytes in advance
26 	 * to slightly speed up fastpath. In some rare cases we could cross
27 	 * boundary of mapped shadow, so we just map some more here.
28 	 */
29 	return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
30 }
31 
clear_pgds(unsigned long start,unsigned long end)32 static void __init clear_pgds(unsigned long start,
33 			unsigned long end)
34 {
35 	for (; start < end; start += PGDIR_SIZE)
36 		pgd_clear(pgd_offset_k(start));
37 }
38 
kasan_map_early_shadow(pgd_t * pgd)39 static void __init kasan_map_early_shadow(pgd_t *pgd)
40 {
41 	int i;
42 	unsigned long start = KASAN_SHADOW_START;
43 	unsigned long end = KASAN_SHADOW_END;
44 
45 	for (i = pgd_index(start); start < end; i++) {
46 		pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
47 				| _KERNPG_TABLE);
48 		start += PGDIR_SIZE;
49 	}
50 }
51 
52 #ifdef CONFIG_KASAN_INLINE
kasan_die_handler(struct notifier_block * self,unsigned long val,void * data)53 static int kasan_die_handler(struct notifier_block *self,
54 			     unsigned long val,
55 			     void *data)
56 {
57 	if (val == DIE_GPF) {
58 		pr_emerg("CONFIG_KASAN_INLINE enabled");
59 		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
60 	}
61 	return NOTIFY_OK;
62 }
63 
64 static struct notifier_block kasan_die_notifier = {
65 	.notifier_call = kasan_die_handler,
66 };
67 #endif
68 
kasan_early_init(void)69 void __init kasan_early_init(void)
70 {
71 	int i;
72 	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
73 	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
74 	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
75 
76 	for (i = 0; i < PTRS_PER_PTE; i++)
77 		kasan_zero_pte[i] = __pte(pte_val);
78 
79 	for (i = 0; i < PTRS_PER_PMD; i++)
80 		kasan_zero_pmd[i] = __pmd(pmd_val);
81 
82 	for (i = 0; i < PTRS_PER_PUD; i++)
83 		kasan_zero_pud[i] = __pud(pud_val);
84 
85 	kasan_map_early_shadow(early_level4_pgt);
86 	kasan_map_early_shadow(init_level4_pgt);
87 }
88 
kasan_init(void)89 void __init kasan_init(void)
90 {
91 	int i;
92 
93 #ifdef CONFIG_KASAN_INLINE
94 	register_die_notifier(&kasan_die_notifier);
95 #endif
96 
97 	memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
98 	load_cr3(early_level4_pgt);
99 	__flush_tlb_all();
100 
101 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
102 
103 	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
104 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
105 
106 	for (i = 0; i < E820_X_MAX; i++) {
107 		if (pfn_mapped[i].end == 0)
108 			break;
109 
110 		if (map_range(&pfn_mapped[i]))
111 			panic("kasan: unable to allocate shadow!");
112 	}
113 	kasan_populate_zero_shadow(
114 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
115 		kasan_mem_to_shadow((void *)__START_KERNEL_map));
116 
117 	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
118 			(unsigned long)kasan_mem_to_shadow(_end),
119 			NUMA_NO_NODE);
120 
121 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
122 			(void *)KASAN_SHADOW_END);
123 
124 	load_cr3(init_level4_pgt);
125 	__flush_tlb_all();
126 
127 	/*
128 	 * kasan_zero_page has been used as early shadow memory, thus it may
129 	 * contain some garbage. Now we can clear and write protect it, since
130 	 * after the TLB flush no one should write to it.
131 	 */
132 	memset(kasan_zero_page, 0, PAGE_SIZE);
133 	for (i = 0; i < PTRS_PER_PTE; i++) {
134 		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
135 		set_pte(&kasan_zero_pte[i], pte);
136 	}
137 	/* Flush TLBs again to be sure that write protection applied. */
138 	__flush_tlb_all();
139 
140 	init_task.kasan_depth = 0;
141 	pr_info("KernelAddressSanitizer initialized\n");
142 }
143