1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 1995-2005 Russell King
3 // Copyright (C) 2012 ARM Ltd.
4 // Copyright (C) 2013-2017 Andes Technology Corporation
5
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/swap.h>
9 #include <linux/init.h>
10 #include <linux/memblock.h>
11 #include <linux/mman.h>
12 #include <linux/nodemask.h>
13 #include <linux/initrd.h>
14 #include <linux/highmem.h>
15
16 #include <asm/sections.h>
17 #include <asm/setup.h>
18 #include <asm/tlb.h>
19 #include <asm/page.h>
20
21 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
22 DEFINE_SPINLOCK(anon_alias_lock);
23 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
24
25 /*
26 * empty_zero_page is a special page that is used for
27 * zero-initialized data and COW.
28 */
29 struct page *empty_zero_page;
30 EXPORT_SYMBOL(empty_zero_page);
31
zone_sizes_init(void)32 static void __init zone_sizes_init(void)
33 {
34 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
35
36 max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
37 #ifdef CONFIG_HIGHMEM
38 max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
39 #endif
40 free_area_init(max_zone_pfn);
41
42 }
43
44 /*
45 * Map all physical memory under high_memory into kernel's address space.
46 *
47 * This is explicitly coded for two-level page tables, so if you need
48 * something else then this needs to change.
49 */
map_ram(void)50 static void __init map_ram(void)
51 {
52 unsigned long v, p, e;
53 pgd_t *pge;
54 p4d_t *p4e;
55 pud_t *pue;
56 pmd_t *pme;
57 pte_t *pte;
58 /* These mark extents of read-only kernel pages...
59 * ...from vmlinux.lds.S
60 */
61
62 p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
63 e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
64
65 v = (u32) __va(p);
66 pge = pgd_offset_k(v);
67
68 while (p < e) {
69 int j;
70 p4e = p4d_offset(pge, v);
71 pue = pud_offset(p4e, v);
72 pme = pmd_offset(pue, v);
73
74 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
75 panic("%s: Kernel hardcoded for "
76 "two-level page tables", __func__);
77 }
78
79 /* Alloc one page for holding PTE's... */
80 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
81 if (!pte)
82 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
83 __func__, PAGE_SIZE, PAGE_SIZE);
84 set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
85
86 /* Fill the newly allocated page with PTE'S */
87 for (j = 0; p < e && j < PTRS_PER_PTE;
88 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
89 /* Create mapping between p and v. */
90 /* TODO: more fine grant for page access permission */
91 set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
92 }
93
94 pge++;
95 }
96 }
97 static pmd_t *fixmap_pmd_p;
fixedrange_init(void)98 static void __init fixedrange_init(void)
99 {
100 unsigned long vaddr;
101 pmd_t *pmd;
102 #ifdef CONFIG_HIGHMEM
103 pte_t *pte;
104 #endif /* CONFIG_HIGHMEM */
105
106 /*
107 * Fixed mappings:
108 */
109 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
110 pmd = pmd_off_k(vaddr);
111 fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
112 if (!fixmap_pmd_p)
113 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
114 __func__, PAGE_SIZE, PAGE_SIZE);
115 set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
116
117 #ifdef CONFIG_HIGHMEM
118 /*
119 * Permanent kmaps:
120 */
121 vaddr = PKMAP_BASE;
122
123 pmd = pmd_off_k(vaddr);
124 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
125 if (!pte)
126 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
127 __func__, PAGE_SIZE, PAGE_SIZE);
128 set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
129 pkmap_page_table = pte;
130 #endif /* CONFIG_HIGHMEM */
131 }
132
133 /*
134 * paging_init() sets up the page tables, initialises the zone memory
135 * maps, and sets up the zero page, bad page and bad page tables.
136 */
paging_init(void)137 void __init paging_init(void)
138 {
139 int i;
140 void *zero_page;
141
142 pr_info("Setting up paging and PTEs.\n");
143 /* clear out the init_mm.pgd that will contain the kernel's mappings */
144 for (i = 0; i < PTRS_PER_PGD; i++)
145 swapper_pg_dir[i] = __pgd(1);
146
147 map_ram();
148
149 fixedrange_init();
150
151 /* allocate space for empty_zero_page */
152 zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
153 if (!zero_page)
154 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
155 __func__, PAGE_SIZE, PAGE_SIZE);
156 zone_sizes_init();
157
158 empty_zero_page = virt_to_page(zero_page);
159 flush_dcache_page(empty_zero_page);
160 }
161
free_highmem(void)162 static inline void __init free_highmem(void)
163 {
164 #ifdef CONFIG_HIGHMEM
165 unsigned long pfn;
166 for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
167 phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
168 if (!memblock_is_reserved(paddr))
169 free_highmem_page(pfn_to_page(pfn));
170 }
171 #endif
172 }
173
set_max_mapnr_init(void)174 static void __init set_max_mapnr_init(void)
175 {
176 max_mapnr = max_pfn;
177 }
178
179 /*
180 * mem_init() marks the free areas in the mem_map and tells us how much
181 * memory is free. This is done after various parts of the system have
182 * claimed their memory after the kernel image.
183 */
mem_init(void)184 void __init mem_init(void)
185 {
186 phys_addr_t memory_start = memblock_start_of_DRAM();
187 BUG_ON(!mem_map);
188 set_max_mapnr_init();
189
190 free_highmem();
191
192 /* this will put all low memory onto the freelists */
193 memblock_free_all();
194 mem_init_print_info(NULL);
195
196 pr_info("virtual kernel memory layout:\n"
197 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
198 #ifdef CONFIG_HIGHMEM
199 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
200 #endif
201 " consist : 0x%08lx - 0x%08lx (%4ld MB)\n"
202 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
203 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
204 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
205 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
206 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
207 FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,
208 #ifdef CONFIG_HIGHMEM
209 PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
210 (LAST_PKMAP * PAGE_SIZE) >> 10,
211 #endif
212 CONSISTENT_BASE, CONSISTENT_END,
213 ((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START,
214 (unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20,
215 (unsigned long)__va(memory_start), (unsigned long)high_memory,
216 ((unsigned long)high_memory -
217 (unsigned long)__va(memory_start)) >> 20,
218 (unsigned long)&__init_begin, (unsigned long)&__init_end,
219 ((unsigned long)&__init_end -
220 (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext,
221 (unsigned long)&_edata,
222 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
223 (unsigned long)&_text, (unsigned long)&_etext,
224 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
225
226 /*
227 * Check boundaries twice: Some fundamental inconsistencies can
228 * be detected at build time already.
229 */
230 #ifdef CONFIG_HIGHMEM
231 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
232 BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE);
233 #endif
234 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
235 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
236
237 #ifdef CONFIG_HIGHMEM
238 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
239 BUG_ON(CONSISTENT_END > PKMAP_BASE);
240 #endif
241 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
242 BUG_ON(VMALLOC_START >= VMALLOC_END);
243 BUG_ON((unsigned long)high_memory > VMALLOC_START);
244
245 return;
246 }
247
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)248 void __set_fixmap(enum fixed_addresses idx,
249 phys_addr_t phys, pgprot_t flags)
250 {
251 unsigned long addr = __fix_to_virt(idx);
252 pte_t *pte;
253
254 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
255
256 pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];
257
258 if (pgprot_val(flags)) {
259 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
260 } else {
261 pte_clear(&init_mm, addr, pte);
262 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
263 }
264 }
265