1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2005, Paul Mackerras, IBM Corporation.
4 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 */
7
8 #include <linux/sched.h>
9 #include <linux/memblock.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlb.h>
12 #include <asm/dma.h>
13
14 #include <mm/mmu_decl.h>
15
16 #ifdef CONFIG_SPARSEMEM_VMEMMAP
17 /*
18 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
19 * the vmalloc space using normal page tables, though the size of
20 * pages encoded in the PTEs can be different
21 */
vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)22 int __meminit vmemmap_create_mapping(unsigned long start,
23 unsigned long page_size,
24 unsigned long phys)
25 {
26 /* Create a PTE encoding without page size */
27 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
28 _PAGE_KERNEL_RW;
29
30 /* PTEs only contain page size encodings up to 32M */
31 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
32
33 /* Encode the size in the PTE */
34 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
35
36 /* For each PTE for that area, map things. Note that we don't
37 * increment phys because all PTEs are of the large size and
38 * thus must have the low bits clear
39 */
40 for (i = 0; i < page_size; i += PAGE_SIZE)
41 BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
42
43 return 0;
44 }
45
46 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_remove_mapping(unsigned long start,unsigned long page_size)47 void vmemmap_remove_mapping(unsigned long start,
48 unsigned long page_size)
49 {
50 }
51 #endif
52 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
53
early_alloc_pgtable(unsigned long size)54 static void __init *early_alloc_pgtable(unsigned long size)
55 {
56 void *ptr;
57
58 ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
59 __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
60
61 if (!ptr)
62 panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
63 __func__, size, size, __pa(MAX_DMA_ADDRESS));
64
65 return ptr;
66 }
67
68 /*
69 * map_kernel_page currently only called by __ioremap
70 * map_kernel_page adds an entry to the ioremap page table
71 * and adds an entry to the HPT, possibly bolting it
72 */
map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t prot)73 int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
74 {
75 pgd_t *pgdp;
76 p4d_t *p4dp;
77 pud_t *pudp;
78 pmd_t *pmdp;
79 pte_t *ptep;
80
81 BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
82 if (slab_is_available()) {
83 pgdp = pgd_offset_k(ea);
84 p4dp = p4d_offset(pgdp, ea);
85 pudp = pud_alloc(&init_mm, p4dp, ea);
86 if (!pudp)
87 return -ENOMEM;
88 pmdp = pmd_alloc(&init_mm, pudp, ea);
89 if (!pmdp)
90 return -ENOMEM;
91 ptep = pte_alloc_kernel(pmdp, ea);
92 if (!ptep)
93 return -ENOMEM;
94 } else {
95 pgdp = pgd_offset_k(ea);
96 p4dp = p4d_offset(pgdp, ea);
97 if (p4d_none(*p4dp)) {
98 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
99 p4d_populate(&init_mm, p4dp, pudp);
100 }
101 pudp = pud_offset(p4dp, ea);
102 if (pud_none(*pudp)) {
103 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
104 pud_populate(&init_mm, pudp, pmdp);
105 }
106 pmdp = pmd_offset(pudp, ea);
107 if (!pmd_present(*pmdp)) {
108 ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
109 pmd_populate_kernel(&init_mm, pmdp, ptep);
110 }
111 ptep = pte_offset_kernel(pmdp, ea);
112 }
113 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
114
115 smp_wmb();
116 return 0;
117 }
118