• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define DISABLE_BRANCH_PROFILING
4 
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9 #include <asm/pgalloc.h>
10 #include <asm/code-patching.h>
11 #include <mm/mmu_decl.h>
12 
kasan_prot_ro(void)13 static pgprot_t __init kasan_prot_ro(void)
14 {
15 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
16 		return PAGE_READONLY;
17 
18 	return PAGE_KERNEL_RO;
19 }
20 
kasan_populate_pte(pte_t * ptep,pgprot_t prot)21 static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
22 {
23 	unsigned long va = (unsigned long)kasan_early_shadow_page;
24 	phys_addr_t pa = __pa(kasan_early_shadow_page);
25 	int i;
26 
27 	for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
28 		__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
29 }
30 
kasan_init_shadow_page_tables(unsigned long k_start,unsigned long k_end)31 int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
32 {
33 	pmd_t *pmd;
34 	unsigned long k_cur, k_next;
35 
36 	pmd = pmd_off_k(k_start);
37 
38 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
39 		pte_t *new;
40 
41 		k_next = pgd_addr_end(k_cur, k_end);
42 		if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
43 			continue;
44 
45 		new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
46 
47 		if (!new)
48 			return -ENOMEM;
49 		kasan_populate_pte(new, PAGE_KERNEL);
50 		pmd_populate_kernel(&init_mm, pmd, new);
51 	}
52 	return 0;
53 }
54 
kasan_init_region(void * start,size_t size)55 int __init __weak kasan_init_region(void *start, size_t size)
56 {
57 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
58 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
59 	unsigned long k_cur;
60 	int ret;
61 	void *block;
62 
63 	ret = kasan_init_shadow_page_tables(k_start, k_end);
64 	if (ret)
65 		return ret;
66 
67 	block = memblock_alloc(k_end - k_start, PAGE_SIZE);
68 	if (!block)
69 		return -ENOMEM;
70 
71 	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
72 		pmd_t *pmd = pmd_off_k(k_cur);
73 		void *va = block + k_cur - k_start;
74 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
75 
76 		__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
77 	}
78 	flush_tlb_kernel_range(k_start, k_end);
79 	return 0;
80 }
81 
82 void __init
kasan_update_early_region(unsigned long k_start,unsigned long k_end,pte_t pte)83 kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
84 {
85 	unsigned long k_cur;
86 
87 	for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
88 		pmd_t *pmd = pmd_off_k(k_cur);
89 		pte_t *ptep = pte_offset_kernel(pmd, k_cur);
90 
91 		if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page)))
92 			continue;
93 
94 		__set_pte_at(&init_mm, k_cur, ptep, pte, 0);
95 	}
96 
97 	flush_tlb_kernel_range(k_start, k_end);
98 }
99 
kasan_remap_early_shadow_ro(void)100 static void __init kasan_remap_early_shadow_ro(void)
101 {
102 	pgprot_t prot = kasan_prot_ro();
103 	phys_addr_t pa = __pa(kasan_early_shadow_page);
104 
105 	kasan_populate_pte(kasan_early_shadow_pte, prot);
106 
107 	kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END,
108 				  pfn_pte(PHYS_PFN(pa), prot));
109 }
110 
kasan_unmap_early_shadow_vmalloc(void)111 static void __init kasan_unmap_early_shadow_vmalloc(void)
112 {
113 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
114 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
115 
116 	kasan_update_early_region(k_start, k_end, __pte(0));
117 
118 #ifdef MODULES_VADDR
119 	k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR);
120 	k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END);
121 	kasan_update_early_region(k_start, k_end, __pte(0));
122 #endif
123 }
124 
kasan_mmu_init(void)125 void __init kasan_mmu_init(void)
126 {
127 	int ret;
128 
129 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
130 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
131 
132 		if (ret)
133 			panic("kasan: kasan_init_shadow_page_tables() failed");
134 	}
135 }
136 
kasan_init(void)137 void __init kasan_init(void)
138 {
139 	phys_addr_t base, end;
140 	u64 i;
141 	int ret;
142 
143 	for_each_mem_range(i, &base, &end) {
144 		phys_addr_t top = min(end, total_lowmem);
145 
146 		if (base >= top)
147 			continue;
148 
149 		ret = kasan_init_region(__va(base), top - base);
150 		if (ret)
151 			panic("kasan: kasan_init_region() failed");
152 	}
153 
154 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
155 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
156 
157 		if (ret)
158 			panic("kasan: kasan_init_shadow_page_tables() failed");
159 	}
160 
161 	kasan_remap_early_shadow_ro();
162 
163 	clear_page(kasan_early_shadow_page);
164 
165 	/* At this point kasan is fully initialized. Enable error messages */
166 	init_task.kasan_depth = 0;
167 	pr_info("KASAN init done\n");
168 }
169 
kasan_late_init(void)170 void __init kasan_late_init(void)
171 {
172 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
173 		kasan_unmap_early_shadow_vmalloc();
174 }
175 
kasan_early_init(void)176 void __init kasan_early_init(void)
177 {
178 	unsigned long addr = KASAN_SHADOW_START;
179 	unsigned long end = KASAN_SHADOW_END;
180 	unsigned long next;
181 	pmd_t *pmd = pmd_off_k(addr);
182 
183 	BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
184 
185 	kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
186 
187 	do {
188 		next = pgd_addr_end(addr, end);
189 		pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
190 	} while (pmd++, addr = next, addr != end);
191 }
192