• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #define DISABLE_BRANCH_PROFILING
4 
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/moduleloader.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 #include <asm/pgalloc.h>
12 #include <asm/code-patching.h>
13 #include <mm/mmu_decl.h>
14 
kasan_prot_ro(void)15 static pgprot_t kasan_prot_ro(void)
16 {
17 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
18 		return PAGE_READONLY;
19 
20 	return PAGE_KERNEL_RO;
21 }
22 
kasan_populate_pte(pte_t * ptep,pgprot_t prot)23 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
24 {
25 	unsigned long va = (unsigned long)kasan_early_shadow_page;
26 	phys_addr_t pa = __pa(kasan_early_shadow_page);
27 	int i;
28 
29 	for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
30 		__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
31 }
32 
kasan_init_shadow_page_tables(unsigned long k_start,unsigned long k_end)33 static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
34 {
35 	pmd_t *pmd;
36 	unsigned long k_cur, k_next;
37 	pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL;
38 
39 	pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
40 
41 	for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
42 		pte_t *new;
43 
44 		k_next = pgd_addr_end(k_cur, k_end);
45 		if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
46 			continue;
47 
48 		if (slab_is_available())
49 			new = pte_alloc_one_kernel(&init_mm);
50 		else
51 			new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
52 
53 		if (!new)
54 			return -ENOMEM;
55 		kasan_populate_pte(new, prot);
56 
57 		smp_wmb(); /* See comment in __pte_alloc */
58 
59 		spin_lock(&init_mm.page_table_lock);
60 			/* Has another populated it ? */
61 		if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
62 			pmd_populate_kernel(&init_mm, pmd, new);
63 			new = NULL;
64 		}
65 		spin_unlock(&init_mm.page_table_lock);
66 
67 		if (new && slab_is_available())
68 			pte_free_kernel(&init_mm, new);
69 	}
70 	return 0;
71 }
72 
kasan_get_one_page(void)73 static void __ref *kasan_get_one_page(void)
74 {
75 	if (slab_is_available())
76 		return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
77 
78 	return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
79 }
80 
kasan_init_region(void * start,size_t size)81 static int __ref kasan_init_region(void *start, size_t size)
82 {
83 	unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
84 	unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
85 	unsigned long k_cur;
86 	int ret;
87 	void *block = NULL;
88 
89 	ret = kasan_init_shadow_page_tables(k_start, k_end);
90 	if (ret)
91 		return ret;
92 
93 	if (!slab_is_available())
94 		block = memblock_alloc(k_end - k_start, PAGE_SIZE);
95 
96 	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
97 		pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
98 		void *va = block ? block + k_cur - k_start : kasan_get_one_page();
99 		pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
100 
101 		if (!va)
102 			return -ENOMEM;
103 
104 		__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
105 	}
106 	flush_tlb_kernel_range(k_start, k_end);
107 	return 0;
108 }
109 
kasan_remap_early_shadow_ro(void)110 static void __init kasan_remap_early_shadow_ro(void)
111 {
112 	pgprot_t prot = kasan_prot_ro();
113 	unsigned long k_start = KASAN_SHADOW_START;
114 	unsigned long k_end = KASAN_SHADOW_END;
115 	unsigned long k_cur;
116 	phys_addr_t pa = __pa(kasan_early_shadow_page);
117 
118 	kasan_populate_pte(kasan_early_shadow_pte, prot);
119 
120 	for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
121 		pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
122 		pte_t *ptep = pte_offset_kernel(pmd, k_cur);
123 
124 		if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
125 			continue;
126 
127 		__set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
128 	}
129 	flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
130 }
131 
kasan_mmu_init(void)132 void __init kasan_mmu_init(void)
133 {
134 	int ret;
135 	struct memblock_region *reg;
136 
137 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
138 		ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
139 
140 		if (ret)
141 			panic("kasan: kasan_init_shadow_page_tables() failed");
142 	}
143 
144 	for_each_memblock(memory, reg) {
145 		phys_addr_t base = reg->base;
146 		phys_addr_t top = min(base + reg->size, total_lowmem);
147 
148 		if (base >= top)
149 			continue;
150 
151 		ret = kasan_init_region(__va(base), top - base);
152 		if (ret)
153 			panic("kasan: kasan_init_region() failed");
154 	}
155 }
156 
kasan_init(void)157 void __init kasan_init(void)
158 {
159 	kasan_remap_early_shadow_ro();
160 
161 	clear_page(kasan_early_shadow_page);
162 
163 	/* At this point kasan is fully initialized. Enable error messages */
164 	init_task.kasan_depth = 0;
165 	pr_info("KASAN init done\n");
166 }
167 
168 #ifdef CONFIG_MODULES
module_alloc(unsigned long size)169 void *module_alloc(unsigned long size)
170 {
171 	void *base;
172 
173 	base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
174 				    GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
175 				    NUMA_NO_NODE, __builtin_return_address(0));
176 
177 	if (!base)
178 		return NULL;
179 
180 	if (!kasan_init_region(base, size))
181 		return base;
182 
183 	vfree(base);
184 
185 	return NULL;
186 }
187 #endif
188 
189 #ifdef CONFIG_PPC_BOOK3S_32
190 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
191 
kasan_early_hash_table(void)192 static void __init kasan_early_hash_table(void)
193 {
194 	modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
195 	modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
196 
197 	Hash = (struct hash_pte *)early_hash;
198 }
199 #else
kasan_early_hash_table(void)200 static void __init kasan_early_hash_table(void) {}
201 #endif
202 
kasan_early_init(void)203 void __init kasan_early_init(void)
204 {
205 	unsigned long addr = KASAN_SHADOW_START;
206 	unsigned long end = KASAN_SHADOW_END;
207 	unsigned long next;
208 	pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
209 
210 	BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
211 
212 	kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
213 
214 	do {
215 		next = pgd_addr_end(addr, end);
216 		pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
217 	} while (pmd++, addr = next, addr != end);
218 
219 	if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
220 		kasan_early_hash_table();
221 }
222