1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/mmu.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memory.h>
21 #include <linux/fs.h>
22 #include <linux/io.h>
23 #include <linux/mm.h>
24 #include <linux/vmalloc.h>
25
26 #include <asm/barrier.h>
27 #include <asm/cputype.h>
28 #include <asm/fixmap.h>
29 #include <asm/kasan.h>
30 #include <asm/kernel-pgtable.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <linux/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36 #include <asm/ptdump.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
39
40 #define NO_BLOCK_MAPPINGS BIT(0)
41 #define NO_CONT_MAPPINGS BIT(1)
42
43 u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
44 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
45
46 u64 __section(".mmuoff.data.write") vabits_actual;
47 EXPORT_SYMBOL(vabits_actual);
48
49 u64 kimage_voffset __ro_after_init;
50 EXPORT_SYMBOL(kimage_voffset);
51
52 /*
53 * Empty_zero_page is a special page that is used for zero-initialized data
54 * and COW.
55 */
56 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
57 EXPORT_SYMBOL(empty_zero_page);
58
59 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
60 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
61 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
62
63 static DEFINE_SPINLOCK(swapper_pgdir_lock);
64
set_swapper_pgd(pgd_t * pgdp,pgd_t pgd)65 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
66 {
67 pgd_t *fixmap_pgdp;
68
69 spin_lock(&swapper_pgdir_lock);
70 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
71 WRITE_ONCE(*fixmap_pgdp, pgd);
72 /*
73 * We need dsb(ishst) here to ensure the page-table-walker sees
74 * our new entry before set_p?d() returns. The fixmap's
75 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
76 */
77 pgd_clear_fixmap();
78 spin_unlock(&swapper_pgdir_lock);
79 }
80
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)81 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
82 unsigned long size, pgprot_t vma_prot)
83 {
84 if (!pfn_valid(pfn))
85 return pgprot_noncached(vma_prot);
86 else if (file->f_flags & O_SYNC)
87 return pgprot_writecombine(vma_prot);
88 return vma_prot;
89 }
90 EXPORT_SYMBOL(phys_mem_access_prot);
91
early_pgtable_alloc(int shift)92 static phys_addr_t __init early_pgtable_alloc(int shift)
93 {
94 phys_addr_t phys;
95 void *ptr;
96
97 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
98 if (!phys)
99 panic("Failed to allocate page table page\n");
100
101 /*
102 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
103 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
104 * any level of table.
105 */
106 ptr = pte_set_fixmap(phys);
107
108 memset(ptr, 0, PAGE_SIZE);
109
110 /*
111 * Implicit barriers also ensure the zeroed page is visible to the page
112 * table walker
113 */
114 pte_clear_fixmap();
115
116 return phys;
117 }
118
pgattr_change_is_safe(u64 old,u64 new)119 static bool pgattr_change_is_safe(u64 old, u64 new)
120 {
121 /*
122 * The following mapping attributes may be updated in live
123 * kernel mappings without the need for break-before-make.
124 */
125 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
126
127 /* creating or taking down mappings is always safe */
128 if (old == 0 || new == 0)
129 return true;
130
131 /* live contiguous mappings may not be manipulated at all */
132 if ((old | new) & PTE_CONT)
133 return false;
134
135 /* Transitioning from Non-Global to Global is unsafe */
136 if (old & ~new & PTE_NG)
137 return false;
138
139 /*
140 * Changing the memory type between Normal and Normal-Tagged is safe
141 * since Tagged is considered a permission attribute from the
142 * mismatched attribute aliases perspective.
143 */
144 if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
145 (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
146 ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
147 (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
148 mask |= PTE_ATTRINDX_MASK;
149
150 return ((old ^ new) & ~mask) == 0;
151 }
152
init_pte(pmd_t * pmdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot)153 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
154 phys_addr_t phys, pgprot_t prot)
155 {
156 pte_t *ptep;
157
158 ptep = pte_set_fixmap_offset(pmdp, addr);
159 do {
160 pte_t old_pte = READ_ONCE(*ptep);
161
162 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
163
164 /*
165 * After the PTE entry has been populated once, we
166 * only allow updates to the permission attributes.
167 */
168 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
169 READ_ONCE(pte_val(*ptep))));
170
171 phys += PAGE_SIZE;
172 } while (ptep++, addr += PAGE_SIZE, addr != end);
173
174 pte_clear_fixmap();
175 }
176
alloc_init_cont_pte(pmd_t * pmdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)177 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
178 unsigned long end, phys_addr_t phys,
179 pgprot_t prot,
180 phys_addr_t (*pgtable_alloc)(int),
181 int flags)
182 {
183 unsigned long next;
184 pmd_t pmd = READ_ONCE(*pmdp);
185
186 BUG_ON(pmd_sect(pmd));
187 if (pmd_none(pmd)) {
188 phys_addr_t pte_phys;
189 BUG_ON(!pgtable_alloc);
190 pte_phys = pgtable_alloc(PAGE_SHIFT);
191 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
192 pmd = READ_ONCE(*pmdp);
193 }
194 BUG_ON(pmd_bad(pmd));
195
196 do {
197 pgprot_t __prot = prot;
198
199 next = pte_cont_addr_end(addr, end);
200
201 /* use a contiguous mapping if the range is suitably aligned */
202 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
203 (flags & NO_CONT_MAPPINGS) == 0)
204 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
205
206 init_pte(pmdp, addr, next, phys, __prot);
207
208 phys += next - addr;
209 } while (addr = next, addr != end);
210 }
211
init_pmd(pud_t * pudp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)212 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
213 phys_addr_t phys, pgprot_t prot,
214 phys_addr_t (*pgtable_alloc)(int), int flags)
215 {
216 unsigned long next;
217 pmd_t *pmdp;
218
219 pmdp = pmd_set_fixmap_offset(pudp, addr);
220 do {
221 pmd_t old_pmd = READ_ONCE(*pmdp);
222
223 next = pmd_addr_end(addr, end);
224
225 /* try section mapping first */
226 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
227 (flags & NO_BLOCK_MAPPINGS) == 0) {
228 pmd_set_huge(pmdp, phys, prot);
229
230 /*
231 * After the PMD entry has been populated once, we
232 * only allow updates to the permission attributes.
233 */
234 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
235 READ_ONCE(pmd_val(*pmdp))));
236 } else {
237 alloc_init_cont_pte(pmdp, addr, next, phys, prot,
238 pgtable_alloc, flags);
239
240 BUG_ON(pmd_val(old_pmd) != 0 &&
241 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
242 }
243 phys += next - addr;
244 } while (pmdp++, addr = next, addr != end);
245
246 pmd_clear_fixmap();
247 }
248
alloc_init_cont_pmd(pud_t * pudp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)249 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
250 unsigned long end, phys_addr_t phys,
251 pgprot_t prot,
252 phys_addr_t (*pgtable_alloc)(int), int flags)
253 {
254 unsigned long next;
255 pud_t pud = READ_ONCE(*pudp);
256
257 /*
258 * Check for initial section mappings in the pgd/pud.
259 */
260 BUG_ON(pud_sect(pud));
261 if (pud_none(pud)) {
262 phys_addr_t pmd_phys;
263 BUG_ON(!pgtable_alloc);
264 pmd_phys = pgtable_alloc(PMD_SHIFT);
265 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
266 pud = READ_ONCE(*pudp);
267 }
268 BUG_ON(pud_bad(pud));
269
270 do {
271 pgprot_t __prot = prot;
272
273 next = pmd_cont_addr_end(addr, end);
274
275 /* use a contiguous mapping if the range is suitably aligned */
276 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
277 (flags & NO_CONT_MAPPINGS) == 0)
278 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
279
280 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
281
282 phys += next - addr;
283 } while (addr = next, addr != end);
284 }
285
use_1G_block(unsigned long addr,unsigned long next,unsigned long phys)286 static inline bool use_1G_block(unsigned long addr, unsigned long next,
287 unsigned long phys)
288 {
289 if (PAGE_SHIFT != 12)
290 return false;
291
292 if (((addr | next | phys) & ~PUD_MASK) != 0)
293 return false;
294
295 return true;
296 }
297
alloc_init_pud(pgd_t * pgdp,unsigned long addr,unsigned long end,phys_addr_t phys,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)298 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
299 phys_addr_t phys, pgprot_t prot,
300 phys_addr_t (*pgtable_alloc)(int),
301 int flags)
302 {
303 unsigned long next;
304 pud_t *pudp;
305 p4d_t *p4dp = p4d_offset(pgdp, addr);
306 p4d_t p4d = READ_ONCE(*p4dp);
307
308 if (p4d_none(p4d)) {
309 phys_addr_t pud_phys;
310 BUG_ON(!pgtable_alloc);
311 pud_phys = pgtable_alloc(PUD_SHIFT);
312 __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
313 p4d = READ_ONCE(*p4dp);
314 }
315 BUG_ON(p4d_bad(p4d));
316
317 pudp = pud_set_fixmap_offset(p4dp, addr);
318 do {
319 pud_t old_pud = READ_ONCE(*pudp);
320
321 next = pud_addr_end(addr, end);
322
323 /*
324 * For 4K granule only, attempt to put down a 1GB block
325 */
326 if (use_1G_block(addr, next, phys) &&
327 (flags & NO_BLOCK_MAPPINGS) == 0) {
328 pud_set_huge(pudp, phys, prot);
329
330 /*
331 * After the PUD entry has been populated once, we
332 * only allow updates to the permission attributes.
333 */
334 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
335 READ_ONCE(pud_val(*pudp))));
336 } else {
337 alloc_init_cont_pmd(pudp, addr, next, phys, prot,
338 pgtable_alloc, flags);
339
340 BUG_ON(pud_val(old_pud) != 0 &&
341 pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
342 }
343 phys += next - addr;
344 } while (pudp++, addr = next, addr != end);
345
346 pud_clear_fixmap();
347 }
348
__create_pgd_mapping(pgd_t * pgdir,phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot,phys_addr_t (* pgtable_alloc)(int),int flags)349 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
350 unsigned long virt, phys_addr_t size,
351 pgprot_t prot,
352 phys_addr_t (*pgtable_alloc)(int),
353 int flags)
354 {
355 unsigned long addr, end, next;
356 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
357
358 /*
359 * If the virtual and physical address don't have the same offset
360 * within a page, we cannot map the region as the caller expects.
361 */
362 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
363 return;
364
365 phys &= PAGE_MASK;
366 addr = virt & PAGE_MASK;
367 end = PAGE_ALIGN(virt + size);
368
369 do {
370 next = pgd_addr_end(addr, end);
371 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
372 flags);
373 phys += next - addr;
374 } while (pgdp++, addr = next, addr != end);
375 }
376
__pgd_pgtable_alloc(int shift)377 static phys_addr_t __pgd_pgtable_alloc(int shift)
378 {
379 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
380 BUG_ON(!ptr);
381
382 /* Ensure the zeroed page is visible to the page table walker */
383 dsb(ishst);
384 return __pa(ptr);
385 }
386
pgd_pgtable_alloc(int shift)387 static phys_addr_t pgd_pgtable_alloc(int shift)
388 {
389 phys_addr_t pa = __pgd_pgtable_alloc(shift);
390
391 /*
392 * Call proper page table ctor in case later we need to
393 * call core mm functions like apply_to_page_range() on
394 * this pre-allocated page table.
395 *
396 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
397 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
398 */
399 if (shift == PAGE_SHIFT)
400 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
401 else if (shift == PMD_SHIFT)
402 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
403
404 return pa;
405 }
406
407 /*
408 * This function can only be used to modify existing table entries,
409 * without allocating new levels of table. Note that this permits the
410 * creation of new section or page entries.
411 */
create_mapping_noalloc(phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot)412 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
413 phys_addr_t size, pgprot_t prot)
414 {
415 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
416 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
417 &phys, virt);
418 return;
419 }
420 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
421 NO_CONT_MAPPINGS);
422 }
423
create_pgd_mapping(struct mm_struct * mm,phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot,bool page_mappings_only)424 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
425 unsigned long virt, phys_addr_t size,
426 pgprot_t prot, bool page_mappings_only)
427 {
428 int flags = 0;
429
430 BUG_ON(mm == &init_mm);
431
432 if (page_mappings_only)
433 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
434
435 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
436 pgd_pgtable_alloc, flags);
437 }
438
update_mapping_prot(phys_addr_t phys,unsigned long virt,phys_addr_t size,pgprot_t prot)439 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
440 phys_addr_t size, pgprot_t prot)
441 {
442 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
443 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
444 &phys, virt);
445 return;
446 }
447
448 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
449 NO_CONT_MAPPINGS);
450
451 /* flush the TLBs after updating live kernel mappings */
452 flush_tlb_kernel_range(virt, virt + size);
453 }
454
__map_memblock(pgd_t * pgdp,phys_addr_t start,phys_addr_t end,pgprot_t prot,int flags)455 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
456 phys_addr_t end, pgprot_t prot, int flags)
457 {
458 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
459 prot, early_pgtable_alloc, flags);
460 }
461
mark_linear_text_alias_ro(void)462 void __init mark_linear_text_alias_ro(void)
463 {
464 /*
465 * Remove the write permissions from the linear alias of .text/.rodata
466 */
467 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
468 (unsigned long)__init_begin - (unsigned long)_text,
469 PAGE_KERNEL_RO);
470 }
471
472 static bool crash_mem_map __initdata;
473
enable_crash_mem_map(char * arg)474 static int __init enable_crash_mem_map(char *arg)
475 {
476 /*
477 * Proper parameter parsing is done by reserve_crashkernel(). We only
478 * need to know if the linear map has to avoid block mappings so that
479 * the crashkernel reservations can be unmapped later.
480 */
481 crash_mem_map = true;
482
483 return 0;
484 }
485 early_param("crashkernel", enable_crash_mem_map);
486
map_mem(pgd_t * pgdp)487 static void __init map_mem(pgd_t *pgdp)
488 {
489 phys_addr_t kernel_start = __pa_symbol(_text);
490 phys_addr_t kernel_end = __pa_symbol(__init_begin);
491 phys_addr_t start, end;
492 int flags = 0;
493 u64 i;
494
495 if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
496 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
497
498 /*
499 * Take care not to create a writable alias for the
500 * read-only text and rodata sections of the kernel image.
501 * So temporarily mark them as NOMAP to skip mappings in
502 * the following for-loop
503 */
504 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
505
506 /* map all the memory banks */
507 for_each_mem_range(i, &start, &end) {
508 if (start >= end)
509 break;
510 /*
511 * The linear map must allow allocation tags reading/writing
512 * if MTE is present. Otherwise, it has the same attributes as
513 * PAGE_KERNEL.
514 */
515 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
516 flags);
517 }
518
519 /*
520 * Map the linear alias of the [_text, __init_begin) interval
521 * as non-executable now, and remove the write permission in
522 * mark_linear_text_alias_ro() below (which will be called after
523 * alternative patching has completed). This makes the contents
524 * of the region accessible to subsystems such as hibernate,
525 * but protects it from inadvertent modification or execution.
526 * Note that contiguous mappings cannot be remapped in this way,
527 * so we should avoid them here.
528 */
529 __map_memblock(pgdp, kernel_start, kernel_end,
530 PAGE_KERNEL, NO_CONT_MAPPINGS);
531 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
532 }
533
mark_rodata_ro(void)534 void mark_rodata_ro(void)
535 {
536 unsigned long section_size;
537
538 /*
539 * mark .rodata as read only. Use __init_begin rather than __end_rodata
540 * to cover NOTES and EXCEPTION_TABLE.
541 */
542 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
543 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
544 section_size, PAGE_KERNEL_RO);
545
546 debug_checkwx();
547 }
548
map_kernel_segment(pgd_t * pgdp,void * va_start,void * va_end,pgprot_t prot,struct vm_struct * vma,int flags,unsigned long vm_flags)549 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
550 pgprot_t prot, struct vm_struct *vma,
551 int flags, unsigned long vm_flags)
552 {
553 phys_addr_t pa_start = __pa_symbol(va_start);
554 unsigned long size = va_end - va_start;
555
556 BUG_ON(!PAGE_ALIGNED(pa_start));
557 BUG_ON(!PAGE_ALIGNED(size));
558
559 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
560 early_pgtable_alloc, flags);
561
562 if (!(vm_flags & VM_NO_GUARD))
563 size += PAGE_SIZE;
564
565 vma->addr = va_start;
566 vma->phys_addr = pa_start;
567 vma->size = size;
568 vma->flags = VM_MAP | vm_flags;
569 vma->caller = __builtin_return_address(0);
570
571 vm_area_add_early(vma);
572 }
573
parse_rodata(char * arg)574 static int __init parse_rodata(char *arg)
575 {
576 int ret = strtobool(arg, &rodata_enabled);
577 if (!ret) {
578 rodata_full = false;
579 return 0;
580 }
581
582 /* permit 'full' in addition to boolean options */
583 if (strcmp(arg, "full"))
584 return -EINVAL;
585
586 rodata_enabled = true;
587 rodata_full = true;
588 return 0;
589 }
590 early_param("rodata", parse_rodata);
591
592 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
map_entry_trampoline(void)593 static int __init map_entry_trampoline(void)
594 {
595 int i;
596
597 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
598 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
599
600 /* The trampoline is always mapped and can therefore be global */
601 pgprot_val(prot) &= ~PTE_NG;
602
603 /* Map only the text into the trampoline page table */
604 memset(tramp_pg_dir, 0, PGD_SIZE);
605 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS,
606 entry_tramp_text_size(), prot,
607 __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS);
608
609 /* Map both the text and data into the kernel page table */
610 for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++)
611 __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
612 pa_start + i * PAGE_SIZE, prot);
613
614 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
615 extern char __entry_tramp_data_start[];
616
617 __set_fixmap(FIX_ENTRY_TRAMP_DATA,
618 __pa_symbol(__entry_tramp_data_start),
619 PAGE_KERNEL_RO);
620 }
621
622 return 0;
623 }
624 core_initcall(map_entry_trampoline);
625 #endif
626
627 /*
628 * Open coded check for BTI, only for use to determine configuration
629 * for early mappings for before the cpufeature code has run.
630 */
arm64_early_this_cpu_has_bti(void)631 static bool arm64_early_this_cpu_has_bti(void)
632 {
633 u64 pfr1;
634
635 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
636 return false;
637
638 pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
639 return cpuid_feature_extract_unsigned_field(pfr1,
640 ID_AA64PFR1_BT_SHIFT);
641 }
642
643 /*
644 * Create fine-grained mappings for the kernel.
645 */
map_kernel(pgd_t * pgdp)646 static void __init map_kernel(pgd_t *pgdp)
647 {
648 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
649 vmlinux_initdata, vmlinux_data;
650
651 /*
652 * External debuggers may need to write directly to the text
653 * mapping to install SW breakpoints. Allow this (only) when
654 * explicitly requested with rodata=off.
655 */
656 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
657
658 /*
659 * If we have a CPU that supports BTI and a kernel built for
660 * BTI then mark the kernel executable text as guarded pages
661 * now so we don't have to rewrite the page tables later.
662 */
663 if (arm64_early_this_cpu_has_bti())
664 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
665
666 /*
667 * Only rodata will be remapped with different permissions later on,
668 * all other segments are allowed to use contiguous mappings.
669 */
670 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
671 VM_NO_GUARD);
672 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
673 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
674 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
675 &vmlinux_inittext, 0, VM_NO_GUARD);
676 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
677 &vmlinux_initdata, 0, VM_NO_GUARD);
678 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
679
680 if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
681 /*
682 * The fixmap falls in a separate pgd to the kernel, and doesn't
683 * live in the carveout for the swapper_pg_dir. We can simply
684 * re-use the existing dir for the fixmap.
685 */
686 set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
687 READ_ONCE(*pgd_offset_k(FIXADDR_START)));
688 } else if (CONFIG_PGTABLE_LEVELS > 3) {
689 pgd_t *bm_pgdp;
690 p4d_t *bm_p4dp;
691 pud_t *bm_pudp;
692 /*
693 * The fixmap shares its top level pgd entry with the kernel
694 * mapping. This can really only occur when we are running
695 * with 16k/4 levels, so we can simply reuse the pud level
696 * entry instead.
697 */
698 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
699 bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
700 bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
701 bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
702 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
703 pud_clear_fixmap();
704 } else {
705 BUG();
706 }
707
708 kasan_copy_shadow(pgdp);
709 }
710
paging_init(void)711 void __init paging_init(void)
712 {
713 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
714
715 map_kernel(pgdp);
716 map_mem(pgdp);
717
718 pgd_clear_fixmap();
719
720 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
721 init_mm.pgd = swapper_pg_dir;
722
723 memblock_free(__pa_symbol(init_pg_dir),
724 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
725
726 memblock_allow_resize();
727 }
728
729 /*
730 * Check whether a kernel address is valid (derived from arch/x86/).
731 */
kern_addr_valid(unsigned long addr)732 int kern_addr_valid(unsigned long addr)
733 {
734 pgd_t *pgdp;
735 p4d_t *p4dp;
736 pud_t *pudp, pud;
737 pmd_t *pmdp, pmd;
738 pte_t *ptep, pte;
739
740 addr = arch_kasan_reset_tag(addr);
741 if ((((long)addr) >> VA_BITS) != -1UL)
742 return 0;
743
744 pgdp = pgd_offset_k(addr);
745 if (pgd_none(READ_ONCE(*pgdp)))
746 return 0;
747
748 p4dp = p4d_offset(pgdp, addr);
749 if (p4d_none(READ_ONCE(*p4dp)))
750 return 0;
751
752 pudp = pud_offset(p4dp, addr);
753 pud = READ_ONCE(*pudp);
754 if (pud_none(pud))
755 return 0;
756
757 if (pud_sect(pud))
758 return pfn_valid(pud_pfn(pud));
759
760 pmdp = pmd_offset(pudp, addr);
761 pmd = READ_ONCE(*pmdp);
762 if (pmd_none(pmd))
763 return 0;
764
765 if (pmd_sect(pmd))
766 return pfn_valid(pmd_pfn(pmd));
767
768 ptep = pte_offset_kernel(pmdp, addr);
769 pte = READ_ONCE(*ptep);
770 if (pte_none(pte))
771 return 0;
772
773 return pfn_valid(pte_pfn(pte));
774 }
775
776 #ifdef CONFIG_MEMORY_HOTPLUG
free_hotplug_page_range(struct page * page,size_t size,struct vmem_altmap * altmap)777 static void free_hotplug_page_range(struct page *page, size_t size,
778 struct vmem_altmap *altmap)
779 {
780 if (altmap) {
781 vmem_altmap_free(altmap, size >> PAGE_SHIFT);
782 } else {
783 WARN_ON(PageReserved(page));
784 free_pages((unsigned long)page_address(page), get_order(size));
785 }
786 }
787
free_hotplug_pgtable_page(struct page * page)788 static void free_hotplug_pgtable_page(struct page *page)
789 {
790 free_hotplug_page_range(page, PAGE_SIZE, NULL);
791 }
792
pgtable_range_aligned(unsigned long start,unsigned long end,unsigned long floor,unsigned long ceiling,unsigned long mask)793 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
794 unsigned long floor, unsigned long ceiling,
795 unsigned long mask)
796 {
797 start &= mask;
798 if (start < floor)
799 return false;
800
801 if (ceiling) {
802 ceiling &= mask;
803 if (!ceiling)
804 return false;
805 }
806
807 if (end - 1 > ceiling - 1)
808 return false;
809 return true;
810 }
811
unmap_hotplug_pte_range(pmd_t * pmdp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)812 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
813 unsigned long end, bool free_mapped,
814 struct vmem_altmap *altmap)
815 {
816 pte_t *ptep, pte;
817
818 do {
819 ptep = pte_offset_kernel(pmdp, addr);
820 pte = READ_ONCE(*ptep);
821 if (pte_none(pte))
822 continue;
823
824 WARN_ON(!pte_present(pte));
825 pte_clear(&init_mm, addr, ptep);
826 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
827 if (free_mapped)
828 free_hotplug_page_range(pte_page(pte),
829 PAGE_SIZE, altmap);
830 } while (addr += PAGE_SIZE, addr < end);
831 }
832
unmap_hotplug_pmd_range(pud_t * pudp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)833 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
834 unsigned long end, bool free_mapped,
835 struct vmem_altmap *altmap)
836 {
837 unsigned long next;
838 pmd_t *pmdp, pmd;
839
840 do {
841 next = pmd_addr_end(addr, end);
842 pmdp = pmd_offset(pudp, addr);
843 pmd = READ_ONCE(*pmdp);
844 if (pmd_none(pmd))
845 continue;
846
847 WARN_ON(!pmd_present(pmd));
848 if (pmd_sect(pmd)) {
849 pmd_clear(pmdp);
850
851 /*
852 * One TLBI should be sufficient here as the PMD_SIZE
853 * range is mapped with a single block entry.
854 */
855 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
856 if (free_mapped)
857 free_hotplug_page_range(pmd_page(pmd),
858 PMD_SIZE, altmap);
859 continue;
860 }
861 WARN_ON(!pmd_table(pmd));
862 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
863 } while (addr = next, addr < end);
864 }
865
unmap_hotplug_pud_range(p4d_t * p4dp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)866 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
867 unsigned long end, bool free_mapped,
868 struct vmem_altmap *altmap)
869 {
870 unsigned long next;
871 pud_t *pudp, pud;
872
873 do {
874 next = pud_addr_end(addr, end);
875 pudp = pud_offset(p4dp, addr);
876 pud = READ_ONCE(*pudp);
877 if (pud_none(pud))
878 continue;
879
880 WARN_ON(!pud_present(pud));
881 if (pud_sect(pud)) {
882 pud_clear(pudp);
883
884 /*
885 * One TLBI should be sufficient here as the PUD_SIZE
886 * range is mapped with a single block entry.
887 */
888 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
889 if (free_mapped)
890 free_hotplug_page_range(pud_page(pud),
891 PUD_SIZE, altmap);
892 continue;
893 }
894 WARN_ON(!pud_table(pud));
895 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
896 } while (addr = next, addr < end);
897 }
898
unmap_hotplug_p4d_range(pgd_t * pgdp,unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)899 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
900 unsigned long end, bool free_mapped,
901 struct vmem_altmap *altmap)
902 {
903 unsigned long next;
904 p4d_t *p4dp, p4d;
905
906 do {
907 next = p4d_addr_end(addr, end);
908 p4dp = p4d_offset(pgdp, addr);
909 p4d = READ_ONCE(*p4dp);
910 if (p4d_none(p4d))
911 continue;
912
913 WARN_ON(!p4d_present(p4d));
914 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
915 } while (addr = next, addr < end);
916 }
917
unmap_hotplug_range(unsigned long addr,unsigned long end,bool free_mapped,struct vmem_altmap * altmap)918 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
919 bool free_mapped, struct vmem_altmap *altmap)
920 {
921 unsigned long next;
922 pgd_t *pgdp, pgd;
923
924 /*
925 * altmap can only be used as vmemmap mapping backing memory.
926 * In case the backing memory itself is not being freed, then
927 * altmap is irrelevant. Warn about this inconsistency when
928 * encountered.
929 */
930 WARN_ON(!free_mapped && altmap);
931
932 do {
933 next = pgd_addr_end(addr, end);
934 pgdp = pgd_offset_k(addr);
935 pgd = READ_ONCE(*pgdp);
936 if (pgd_none(pgd))
937 continue;
938
939 WARN_ON(!pgd_present(pgd));
940 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
941 } while (addr = next, addr < end);
942 }
943
free_empty_pte_table(pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)944 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
945 unsigned long end, unsigned long floor,
946 unsigned long ceiling)
947 {
948 pte_t *ptep, pte;
949 unsigned long i, start = addr;
950
951 do {
952 ptep = pte_offset_kernel(pmdp, addr);
953 pte = READ_ONCE(*ptep);
954
955 /*
956 * This is just a sanity check here which verifies that
957 * pte clearing has been done by earlier unmap loops.
958 */
959 WARN_ON(!pte_none(pte));
960 } while (addr += PAGE_SIZE, addr < end);
961
962 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
963 return;
964
965 /*
966 * Check whether we can free the pte page if the rest of the
967 * entries are empty. Overlap with other regions have been
968 * handled by the floor/ceiling check.
969 */
970 ptep = pte_offset_kernel(pmdp, 0UL);
971 for (i = 0; i < PTRS_PER_PTE; i++) {
972 if (!pte_none(READ_ONCE(ptep[i])))
973 return;
974 }
975
976 pmd_clear(pmdp);
977 __flush_tlb_kernel_pgtable(start);
978 free_hotplug_pgtable_page(virt_to_page(ptep));
979 }
980
free_empty_pmd_table(pud_t * pudp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)981 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
982 unsigned long end, unsigned long floor,
983 unsigned long ceiling)
984 {
985 pmd_t *pmdp, pmd;
986 unsigned long i, next, start = addr;
987
988 do {
989 next = pmd_addr_end(addr, end);
990 pmdp = pmd_offset(pudp, addr);
991 pmd = READ_ONCE(*pmdp);
992 if (pmd_none(pmd))
993 continue;
994
995 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
996 free_empty_pte_table(pmdp, addr, next, floor, ceiling);
997 } while (addr = next, addr < end);
998
999 if (CONFIG_PGTABLE_LEVELS <= 2)
1000 return;
1001
1002 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1003 return;
1004
1005 /*
1006 * Check whether we can free the pmd page if the rest of the
1007 * entries are empty. Overlap with other regions have been
1008 * handled by the floor/ceiling check.
1009 */
1010 pmdp = pmd_offset(pudp, 0UL);
1011 for (i = 0; i < PTRS_PER_PMD; i++) {
1012 if (!pmd_none(READ_ONCE(pmdp[i])))
1013 return;
1014 }
1015
1016 pud_clear(pudp);
1017 __flush_tlb_kernel_pgtable(start);
1018 free_hotplug_pgtable_page(virt_to_page(pmdp));
1019 }
1020
free_empty_pud_table(p4d_t * p4dp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1021 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1022 unsigned long end, unsigned long floor,
1023 unsigned long ceiling)
1024 {
1025 pud_t *pudp, pud;
1026 unsigned long i, next, start = addr;
1027
1028 do {
1029 next = pud_addr_end(addr, end);
1030 pudp = pud_offset(p4dp, addr);
1031 pud = READ_ONCE(*pudp);
1032 if (pud_none(pud))
1033 continue;
1034
1035 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1036 free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1037 } while (addr = next, addr < end);
1038
1039 if (CONFIG_PGTABLE_LEVELS <= 3)
1040 return;
1041
1042 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1043 return;
1044
1045 /*
1046 * Check whether we can free the pud page if the rest of the
1047 * entries are empty. Overlap with other regions have been
1048 * handled by the floor/ceiling check.
1049 */
1050 pudp = pud_offset(p4dp, 0UL);
1051 for (i = 0; i < PTRS_PER_PUD; i++) {
1052 if (!pud_none(READ_ONCE(pudp[i])))
1053 return;
1054 }
1055
1056 p4d_clear(p4dp);
1057 __flush_tlb_kernel_pgtable(start);
1058 free_hotplug_pgtable_page(virt_to_page(pudp));
1059 }
1060
free_empty_p4d_table(pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1061 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1062 unsigned long end, unsigned long floor,
1063 unsigned long ceiling)
1064 {
1065 unsigned long next;
1066 p4d_t *p4dp, p4d;
1067
1068 do {
1069 next = p4d_addr_end(addr, end);
1070 p4dp = p4d_offset(pgdp, addr);
1071 p4d = READ_ONCE(*p4dp);
1072 if (p4d_none(p4d))
1073 continue;
1074
1075 WARN_ON(!p4d_present(p4d));
1076 free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1077 } while (addr = next, addr < end);
1078 }
1079
free_empty_tables(unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)1080 static void free_empty_tables(unsigned long addr, unsigned long end,
1081 unsigned long floor, unsigned long ceiling)
1082 {
1083 unsigned long next;
1084 pgd_t *pgdp, pgd;
1085
1086 do {
1087 next = pgd_addr_end(addr, end);
1088 pgdp = pgd_offset_k(addr);
1089 pgd = READ_ONCE(*pgdp);
1090 if (pgd_none(pgd))
1091 continue;
1092
1093 WARN_ON(!pgd_present(pgd));
1094 free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1095 } while (addr = next, addr < end);
1096 }
1097 #endif
1098
1099 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1100 #if !ARM64_SWAPPER_USES_SECTION_MAPS
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1101 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1102 struct vmem_altmap *altmap)
1103 {
1104 return vmemmap_populate_basepages(start, end, node, altmap);
1105 }
1106 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)1107 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1108 struct vmem_altmap *altmap)
1109 {
1110 unsigned long addr = start;
1111 unsigned long next;
1112 pgd_t *pgdp;
1113 p4d_t *p4dp;
1114 pud_t *pudp;
1115 pmd_t *pmdp;
1116
1117 do {
1118 next = pmd_addr_end(addr, end);
1119
1120 pgdp = vmemmap_pgd_populate(addr, node);
1121 if (!pgdp)
1122 return -ENOMEM;
1123
1124 p4dp = vmemmap_p4d_populate(pgdp, addr, node);
1125 if (!p4dp)
1126 return -ENOMEM;
1127
1128 pudp = vmemmap_pud_populate(p4dp, addr, node);
1129 if (!pudp)
1130 return -ENOMEM;
1131
1132 pmdp = pmd_offset(pudp, addr);
1133 if (pmd_none(READ_ONCE(*pmdp))) {
1134 void *p = NULL;
1135
1136 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1137 if (!p)
1138 return -ENOMEM;
1139
1140 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1141 } else
1142 vmemmap_verify((pte_t *)pmdp, node, addr, next);
1143 } while (addr = next, addr != end);
1144
1145 return 0;
1146 }
1147 #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)1148 void vmemmap_free(unsigned long start, unsigned long end,
1149 struct vmem_altmap *altmap)
1150 {
1151 #ifdef CONFIG_MEMORY_HOTPLUG
1152 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1153
1154 unmap_hotplug_range(start, end, true, altmap);
1155 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1156 #endif
1157 }
1158 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
1159
fixmap_pud(unsigned long addr)1160 static inline pud_t * fixmap_pud(unsigned long addr)
1161 {
1162 pgd_t *pgdp = pgd_offset_k(addr);
1163 p4d_t *p4dp = p4d_offset(pgdp, addr);
1164 p4d_t p4d = READ_ONCE(*p4dp);
1165
1166 BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
1167
1168 return pud_offset_kimg(p4dp, addr);
1169 }
1170
fixmap_pmd(unsigned long addr)1171 static inline pmd_t * fixmap_pmd(unsigned long addr)
1172 {
1173 pud_t *pudp = fixmap_pud(addr);
1174 pud_t pud = READ_ONCE(*pudp);
1175
1176 BUG_ON(pud_none(pud) || pud_bad(pud));
1177
1178 return pmd_offset_kimg(pudp, addr);
1179 }
1180
fixmap_pte(unsigned long addr)1181 static inline pte_t * fixmap_pte(unsigned long addr)
1182 {
1183 return &bm_pte[pte_index(addr)];
1184 }
1185
1186 /*
1187 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1188 * directly on kernel symbols (bm_p*d). This function is called too early to use
1189 * lm_alias so __p*d_populate functions must be used to populate with the
1190 * physical address from __pa_symbol.
1191 */
early_fixmap_init(void)1192 void __init early_fixmap_init(void)
1193 {
1194 pgd_t *pgdp;
1195 p4d_t *p4dp, p4d;
1196 pud_t *pudp;
1197 pmd_t *pmdp;
1198 unsigned long addr = FIXADDR_START;
1199
1200 pgdp = pgd_offset_k(addr);
1201 p4dp = p4d_offset(pgdp, addr);
1202 p4d = READ_ONCE(*p4dp);
1203 if (CONFIG_PGTABLE_LEVELS > 3 &&
1204 !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
1205 /*
1206 * We only end up here if the kernel mapping and the fixmap
1207 * share the top level pgd entry, which should only happen on
1208 * 16k/4 levels configurations.
1209 */
1210 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1211 pudp = pud_offset_kimg(p4dp, addr);
1212 } else {
1213 if (p4d_none(p4d))
1214 __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
1215 pudp = fixmap_pud(addr);
1216 }
1217 if (pud_none(READ_ONCE(*pudp)))
1218 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
1219 pmdp = fixmap_pmd(addr);
1220 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1221
1222 /*
1223 * The boot-ioremap range spans multiple pmds, for which
1224 * we are not prepared:
1225 */
1226 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1227 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1228
1229 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1230 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1231 WARN_ON(1);
1232 pr_warn("pmdp %p != %p, %p\n",
1233 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1234 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1235 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1236 fix_to_virt(FIX_BTMAP_BEGIN));
1237 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
1238 fix_to_virt(FIX_BTMAP_END));
1239
1240 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
1241 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
1242 }
1243 }
1244
1245 /*
1246 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1247 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1248 */
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)1249 void __set_fixmap(enum fixed_addresses idx,
1250 phys_addr_t phys, pgprot_t flags)
1251 {
1252 unsigned long addr = __fix_to_virt(idx);
1253 pte_t *ptep;
1254
1255 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1256
1257 ptep = fixmap_pte(addr);
1258
1259 if (pgprot_val(flags)) {
1260 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1261 } else {
1262 pte_clear(&init_mm, addr, ptep);
1263 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1264 }
1265 }
1266
fixmap_remap_fdt(phys_addr_t dt_phys,int * size,pgprot_t prot)1267 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
1268 {
1269 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1270 int offset;
1271 void *dt_virt;
1272
1273 /*
1274 * Check whether the physical FDT address is set and meets the minimum
1275 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
1276 * at least 8 bytes so that we can always access the magic and size
1277 * fields of the FDT header after mapping the first chunk, double check
1278 * here if that is indeed the case.
1279 */
1280 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1281 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1282 return NULL;
1283
1284 /*
1285 * Make sure that the FDT region can be mapped without the need to
1286 * allocate additional translation table pages, so that it is safe
1287 * to call create_mapping_noalloc() this early.
1288 *
1289 * On 64k pages, the FDT will be mapped using PTEs, so we need to
1290 * be in the same PMD as the rest of the fixmap.
1291 * On 4k pages, we'll use section mappings for the FDT so we only
1292 * have to be in the same PUD.
1293 */
1294 BUILD_BUG_ON(dt_virt_base % SZ_2M);
1295
1296 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1297 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
1298
1299 offset = dt_phys % SWAPPER_BLOCK_SIZE;
1300 dt_virt = (void *)dt_virt_base + offset;
1301
1302 /* map the first chunk so we can read the size from the header */
1303 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1304 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
1305
1306 if (fdt_magic(dt_virt) != FDT_MAGIC)
1307 return NULL;
1308
1309 *size = fdt_totalsize(dt_virt);
1310 if (*size > MAX_FDT_SIZE)
1311 return NULL;
1312
1313 if (offset + *size > SWAPPER_BLOCK_SIZE)
1314 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1315 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1316
1317 return dt_virt;
1318 }
1319
arch_ioremap_p4d_supported(void)1320 int __init arch_ioremap_p4d_supported(void)
1321 {
1322 return 0;
1323 }
1324
arch_ioremap_pud_supported(void)1325 int __init arch_ioremap_pud_supported(void)
1326 {
1327 /*
1328 * Only 4k granule supports level 1 block mappings.
1329 * SW table walks can't handle removal of intermediate entries.
1330 */
1331 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
1332 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1333 }
1334
arch_ioremap_pmd_supported(void)1335 int __init arch_ioremap_pmd_supported(void)
1336 {
1337 /* See arch_ioremap_pud_supported() */
1338 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1339 }
1340
pud_set_huge(pud_t * pudp,phys_addr_t phys,pgprot_t prot)1341 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1342 {
1343 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1344
1345 /* Only allow permission changes for now */
1346 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1347 pud_val(new_pud)))
1348 return 0;
1349
1350 VM_BUG_ON(phys & ~PUD_MASK);
1351 set_pud(pudp, new_pud);
1352 return 1;
1353 }
1354
pmd_set_huge(pmd_t * pmdp,phys_addr_t phys,pgprot_t prot)1355 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1356 {
1357 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1358
1359 /* Only allow permission changes for now */
1360 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1361 pmd_val(new_pmd)))
1362 return 0;
1363
1364 VM_BUG_ON(phys & ~PMD_MASK);
1365 set_pmd(pmdp, new_pmd);
1366 return 1;
1367 }
1368
pud_clear_huge(pud_t * pudp)1369 int pud_clear_huge(pud_t *pudp)
1370 {
1371 if (!pud_sect(READ_ONCE(*pudp)))
1372 return 0;
1373 pud_clear(pudp);
1374 return 1;
1375 }
1376
pmd_clear_huge(pmd_t * pmdp)1377 int pmd_clear_huge(pmd_t *pmdp)
1378 {
1379 if (!pmd_sect(READ_ONCE(*pmdp)))
1380 return 0;
1381 pmd_clear(pmdp);
1382 return 1;
1383 }
1384
pmd_free_pte_page(pmd_t * pmdp,unsigned long addr)1385 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1386 {
1387 pte_t *table;
1388 pmd_t pmd;
1389
1390 pmd = READ_ONCE(*pmdp);
1391
1392 if (!pmd_table(pmd)) {
1393 VM_WARN_ON(1);
1394 return 1;
1395 }
1396
1397 table = pte_offset_kernel(pmdp, addr);
1398 pmd_clear(pmdp);
1399 __flush_tlb_kernel_pgtable(addr);
1400 pte_free_kernel(NULL, table);
1401 return 1;
1402 }
1403
pud_free_pmd_page(pud_t * pudp,unsigned long addr)1404 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1405 {
1406 pmd_t *table;
1407 pmd_t *pmdp;
1408 pud_t pud;
1409 unsigned long next, end;
1410
1411 pud = READ_ONCE(*pudp);
1412
1413 if (!pud_table(pud)) {
1414 VM_WARN_ON(1);
1415 return 1;
1416 }
1417
1418 table = pmd_offset(pudp, addr);
1419 pmdp = table;
1420 next = addr;
1421 end = addr + PUD_SIZE;
1422 do {
1423 pmd_free_pte_page(pmdp, next);
1424 } while (pmdp++, next += PMD_SIZE, next != end);
1425
1426 pud_clear(pudp);
1427 __flush_tlb_kernel_pgtable(addr);
1428 pmd_free(NULL, table);
1429 return 1;
1430 }
1431
p4d_free_pud_page(p4d_t * p4d,unsigned long addr)1432 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1433 {
1434 return 0; /* Don't attempt a block mapping */
1435 }
1436
1437 #ifdef CONFIG_MEMORY_HOTPLUG
__remove_pgd_mapping(pgd_t * pgdir,unsigned long start,u64 size)1438 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1439 {
1440 unsigned long end = start + size;
1441
1442 WARN_ON(pgdir != init_mm.pgd);
1443 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1444
1445 unmap_hotplug_range(start, end, false, NULL);
1446 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1447 }
1448
inside_linear_region(u64 start,u64 size)1449 static bool inside_linear_region(u64 start, u64 size)
1450 {
1451 u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1452 u64 end_linear_pa = __pa(PAGE_END - 1);
1453
1454 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1455 /*
1456 * Check for a wrap, it is possible because of randomized linear
1457 * mapping the start physical address is actually bigger than
1458 * the end physical address. In this case set start to zero
1459 * because [0, end_linear_pa] range must still be able to cover
1460 * all addressable physical addresses.
1461 */
1462 if (start_linear_pa > end_linear_pa)
1463 start_linear_pa = 0;
1464 }
1465
1466 WARN_ON(start_linear_pa > end_linear_pa);
1467
1468 /*
1469 * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
1470 * accommodating both its ends but excluding PAGE_END. Max physical
1471 * range which can be mapped inside this linear mapping range, must
1472 * also be derived from its end points.
1473 */
1474 return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
1475 }
1476
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)1477 int arch_add_memory(int nid, u64 start, u64 size,
1478 struct mhp_params *params)
1479 {
1480 int ret, flags = 0;
1481
1482 if (!inside_linear_region(start, size)) {
1483 pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
1484 return -EINVAL;
1485 }
1486
1487 if (rodata_full || debug_pagealloc_enabled())
1488 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1489
1490 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1491 size, params->pgprot, __pgd_pgtable_alloc,
1492 flags);
1493
1494 memblock_clear_nomap(start, size);
1495
1496 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1497 params);
1498 if (ret)
1499 __remove_pgd_mapping(swapper_pg_dir,
1500 __phys_to_virt(start), size);
1501 else {
1502 max_pfn = PFN_UP(start + size);
1503 max_low_pfn = max_pfn;
1504 }
1505
1506 return ret;
1507 }
1508
arch_remove_memory(int nid,u64 start,u64 size,struct vmem_altmap * altmap)1509 void arch_remove_memory(int nid, u64 start, u64 size,
1510 struct vmem_altmap *altmap)
1511 {
1512 unsigned long start_pfn = start >> PAGE_SHIFT;
1513 unsigned long nr_pages = size >> PAGE_SHIFT;
1514
1515 __remove_pages(start_pfn, nr_pages, altmap);
1516 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1517 }
1518
1519 /*
1520 * This memory hotplug notifier helps prevent boot memory from being
1521 * inadvertently removed as it blocks pfn range offlining process in
1522 * __offline_pages(). Hence this prevents both offlining as well as
1523 * removal process for boot memory which is initially always online.
1524 * In future if and when boot memory could be removed, this notifier
1525 * should be dropped and free_hotplug_page_range() should handle any
1526 * reserved pages allocated during boot.
1527 */
prevent_bootmem_remove_notifier(struct notifier_block * nb,unsigned long action,void * data)1528 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1529 unsigned long action, void *data)
1530 {
1531 struct mem_section *ms;
1532 struct memory_notify *arg = data;
1533 unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1534 unsigned long pfn = arg->start_pfn;
1535
1536 if (action != MEM_GOING_OFFLINE)
1537 return NOTIFY_OK;
1538
1539 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1540 ms = __pfn_to_section(pfn);
1541 if (early_section(ms))
1542 return NOTIFY_BAD;
1543 }
1544 return NOTIFY_OK;
1545 }
1546
1547 static struct notifier_block prevent_bootmem_remove_nb = {
1548 .notifier_call = prevent_bootmem_remove_notifier,
1549 };
1550
prevent_bootmem_remove_init(void)1551 static int __init prevent_bootmem_remove_init(void)
1552 {
1553 return register_memory_notifier(&prevent_bootmem_remove_nb);
1554 }
1555 device_initcall(prevent_bootmem_remove_init);
1556 #endif
1557