1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
3
4 #include <linux/pfn.h>
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
13
14 extern pgd_t early_pg_dir[PTRS_PER_PGD];
kasan_early_init(void)15 asmlinkage void __init kasan_early_init(void)
16 {
17 uintptr_t i;
18 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
19
20 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
21 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
22
23 for (i = 0; i < PTRS_PER_PTE; ++i)
24 set_pte(kasan_early_shadow_pte + i,
25 pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
26
27 for (i = 0; i < PTRS_PER_PMD; ++i)
28 set_pmd(kasan_early_shadow_pmd + i,
29 pfn_pmd(PFN_DOWN
30 (__pa((uintptr_t) kasan_early_shadow_pte)),
31 __pgprot(_PAGE_TABLE)));
32
33 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
34 i += PGDIR_SIZE, ++pgd)
35 set_pgd(pgd,
36 pfn_pgd(PFN_DOWN
37 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
38 __pgprot(_PAGE_TABLE)));
39
40 /* init for swapper_pg_dir */
41 pgd = pgd_offset_k(KASAN_SHADOW_START);
42
43 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
44 i += PGDIR_SIZE, ++pgd)
45 set_pgd(pgd,
46 pfn_pgd(PFN_DOWN
47 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
48 __pgprot(_PAGE_TABLE)));
49
50 local_flush_tlb_all();
51 }
52
kasan_populate_pte(pmd_t * pmd,unsigned long vaddr,unsigned long end)53 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
54 {
55 phys_addr_t phys_addr;
56 pte_t *ptep, *base_pte;
57
58 if (pmd_none(*pmd))
59 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
60 else
61 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
62
63 ptep = base_pte + pte_index(vaddr);
64
65 do {
66 if (pte_none(*ptep)) {
67 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
68 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
69 }
70 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
71
72 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
73 }
74
kasan_populate_pmd(pgd_t * pgd,unsigned long vaddr,unsigned long end)75 static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
76 {
77 phys_addr_t phys_addr;
78 pmd_t *pmdp, *base_pmd;
79 unsigned long next;
80
81 base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
82 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
83 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
84
85 pmdp = base_pmd + pmd_index(vaddr);
86
87 do {
88 next = pmd_addr_end(vaddr, end);
89
90 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
91 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
92 if (phys_addr) {
93 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
94 continue;
95 }
96 }
97
98 kasan_populate_pte(pmdp, vaddr, next);
99 } while (pmdp++, vaddr = next, vaddr != end);
100
101 /*
102 * Wait for the whole PGD to be populated before setting the PGD in
103 * the page table, otherwise, if we did set the PGD before populating
104 * it entirely, memblock could allocate a page at a physical address
105 * where KASAN is not populated yet and then we'd get a page fault.
106 */
107 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
108 }
109
kasan_populate_pgd(unsigned long vaddr,unsigned long end)110 static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
111 {
112 phys_addr_t phys_addr;
113 pgd_t *pgdp = pgd_offset_k(vaddr);
114 unsigned long next;
115
116 do {
117 next = pgd_addr_end(vaddr, end);
118
119 /*
120 * pgdp can't be none since kasan_early_init initialized all KASAN
121 * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
122 * that means we can try to allocate a hugepage as a replacement.
123 */
124 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
125 IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
126 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
127 if (phys_addr) {
128 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
129 continue;
130 }
131 }
132
133 kasan_populate_pmd(pgdp, vaddr, next);
134 } while (pgdp++, vaddr = next, vaddr != end);
135 }
136
kasan_populate(void * start,void * end)137 static void __init kasan_populate(void *start, void *end)
138 {
139 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
140 unsigned long vend = PAGE_ALIGN((unsigned long)end);
141
142 kasan_populate_pgd(vaddr, vend);
143
144 local_flush_tlb_all();
145 memset(start, KASAN_SHADOW_INIT, end - start);
146 }
147
kasan_shallow_populate_pgd(unsigned long vaddr,unsigned long end)148 static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
149 {
150 unsigned long next;
151 void *p;
152 pgd_t *pgd_k = pgd_offset_k(vaddr);
153
154 do {
155 next = pgd_addr_end(vaddr, end);
156 if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
157 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
158 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
159 }
160 } while (pgd_k++, vaddr = next, vaddr != end);
161 }
162
kasan_shallow_populate(void * start,void * end)163 static void __init kasan_shallow_populate(void *start, void *end)
164 {
165 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
166 unsigned long vend = PAGE_ALIGN((unsigned long)end);
167
168 kasan_shallow_populate_pgd(vaddr, vend);
169 local_flush_tlb_all();
170 }
171
kasan_init(void)172 void __init kasan_init(void)
173 {
174 phys_addr_t p_start, p_end;
175 u64 i;
176
177 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
178 kasan_shallow_populate(
179 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
180 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
181
182 /* Populate the linear mapping */
183 for_each_mem_range(i, &p_start, &p_end) {
184 void *start = (void *)__va(p_start);
185 void *end = (void *)__va(p_end);
186
187 if (start >= end)
188 break;
189
190 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
191 }
192
193 /* Populate kernel, BPF, modules mapping */
194 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
195 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
196
197 for (i = 0; i < PTRS_PER_PTE; i++)
198 set_pte(&kasan_early_shadow_pte[i],
199 mk_pte(virt_to_page(kasan_early_shadow_page),
200 __pgprot(_PAGE_PRESENT | _PAGE_READ |
201 _PAGE_ACCESSED)));
202
203 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
204 init_task.kasan_depth = 0;
205 }
206