• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5  */
6 
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/memblock.h>
10 #include <linux/initrd.h>
11 #include <linux/swap.h>
12 #include <linux/sizes.h>
13 #include <linux/of_fdt.h>
14 #include <linux/libfdt.h>
15 #include <linux/set_memory.h>
16 
17 #include <asm/fixmap.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/soc.h>
21 #include <asm/io.h>
22 #include <asm/ptdump.h>
23 
24 #include "../kernel/head.h"
25 
26 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
27 							__page_aligned_bss;
28 EXPORT_SYMBOL(empty_zero_page);
29 
30 extern char _start[];
31 #define DTB_EARLY_BASE_VA      PGDIR_SIZE
32 void *dtb_early_va __initdata;
33 uintptr_t dtb_early_pa __initdata;
34 
35 struct pt_alloc_ops {
36 	pte_t *(*get_pte_virt)(phys_addr_t pa);
37 	phys_addr_t (*alloc_pte)(uintptr_t va);
38 #ifndef __PAGETABLE_PMD_FOLDED
39 	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
40 	phys_addr_t (*alloc_pmd)(uintptr_t va);
41 #endif
42 };
43 
zone_sizes_init(void)44 static void __init zone_sizes_init(void)
45 {
46 	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
47 
48 #ifdef CONFIG_ZONE_DMA32
49 	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
50 			(unsigned long) PFN_PHYS(max_low_pfn)));
51 #endif
52 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
53 
54 	free_area_init(max_zone_pfns);
55 }
56 
setup_zero_page(void)57 static void setup_zero_page(void)
58 {
59 	memset((void *)empty_zero_page, 0, PAGE_SIZE);
60 }
61 
62 #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
print_mlk(char * name,unsigned long b,unsigned long t)63 static inline void print_mlk(char *name, unsigned long b, unsigned long t)
64 {
65 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
66 		  (((t) - (b)) >> 10));
67 }
68 
print_mlm(char * name,unsigned long b,unsigned long t)69 static inline void print_mlm(char *name, unsigned long b, unsigned long t)
70 {
71 	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
72 		  (((t) - (b)) >> 20));
73 }
74 
print_vm_layout(void)75 static void print_vm_layout(void)
76 {
77 	pr_notice("Virtual kernel memory layout:\n");
78 	print_mlk("fixmap", (unsigned long)FIXADDR_START,
79 		  (unsigned long)FIXADDR_TOP);
80 	print_mlm("pci io", (unsigned long)PCI_IO_START,
81 		  (unsigned long)PCI_IO_END);
82 	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
83 		  (unsigned long)VMEMMAP_END);
84 	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
85 		  (unsigned long)VMALLOC_END);
86 	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
87 		  (unsigned long)high_memory);
88 }
89 #else
print_vm_layout(void)90 static void print_vm_layout(void) { }
91 #endif /* CONFIG_DEBUG_VM */
92 
mem_init(void)93 void __init mem_init(void)
94 {
95 #ifdef CONFIG_FLATMEM
96 	BUG_ON(!mem_map);
97 #endif /* CONFIG_FLATMEM */
98 
99 	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
100 	memblock_free_all();
101 
102 	mem_init_print_info(NULL);
103 	print_vm_layout();
104 }
105 
106 #ifdef CONFIG_BLK_DEV_INITRD
setup_initrd(void)107 static void __init setup_initrd(void)
108 {
109 	phys_addr_t start;
110 	unsigned long size;
111 
112 	/* Ignore the virtul address computed during device tree parsing */
113 	initrd_start = initrd_end = 0;
114 
115 	if (!phys_initrd_size)
116 		return;
117 	/*
118 	 * Round the memory region to page boundaries as per free_initrd_mem()
119 	 * This allows us to detect whether the pages overlapping the initrd
120 	 * are in use, but more importantly, reserves the entire set of pages
121 	 * as we don't want these pages allocated for other purposes.
122 	 */
123 	start = round_down(phys_initrd_start, PAGE_SIZE);
124 	size = phys_initrd_size + (phys_initrd_start - start);
125 	size = round_up(size, PAGE_SIZE);
126 
127 	if (!memblock_is_region_memory(start, size)) {
128 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
129 		       (u64)start, size);
130 		goto disable;
131 	}
132 
133 	if (memblock_is_region_reserved(start, size)) {
134 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
135 		       (u64)start, size);
136 		goto disable;
137 	}
138 
139 	memblock_reserve(start, size);
140 	/* Now convert initrd to virtual addresses */
141 	initrd_start = (unsigned long)__va(phys_initrd_start);
142 	initrd_end = initrd_start + phys_initrd_size;
143 	initrd_below_start_ok = 1;
144 
145 	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
146 		(void *)(initrd_start), size);
147 	return;
148 disable:
149 	pr_cont(" - disabling initrd\n");
150 	initrd_start = 0;
151 	initrd_end = 0;
152 }
153 #endif /* CONFIG_BLK_DEV_INITRD */
154 
setup_bootmem(void)155 void __init setup_bootmem(void)
156 {
157 	phys_addr_t mem_start = 0;
158 	phys_addr_t start, dram_end, end = 0;
159 	phys_addr_t vmlinux_end = __pa_symbol(&_end);
160 	phys_addr_t vmlinux_start = __pa_symbol(&_start);
161 	phys_addr_t max_mapped_addr = __pa(~(ulong)0);
162 	u64 i;
163 
164 	/* Find the memory region containing the kernel */
165 	for_each_mem_range(i, &start, &end) {
166 		phys_addr_t size = end - start;
167 		if (!mem_start)
168 			mem_start = start;
169 		if (start <= vmlinux_start && vmlinux_end <= end)
170 			BUG_ON(size == 0);
171 	}
172 
173 	/*
174 	 * The maximal physical memory size is -PAGE_OFFSET.
175 	 * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
176 	 * as it is unusable by kernel.
177 	 */
178 	memblock_enforce_memory_limit(-PAGE_OFFSET);
179 
180 	/* Reserve from the start of the kernel to the end of the kernel */
181 	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
182 
183 	dram_end = memblock_end_of_DRAM();
184 
185 	/*
186 	 * memblock allocator is not aware of the fact that last 4K bytes of
187 	 * the addressable memory can not be mapped because of IS_ERR_VALUE
188 	 * macro. Make sure that last 4k bytes are not usable by memblock
189 	 * if end of dram is equal to maximum addressable memory.
190 	 */
191 	if (max_mapped_addr == (dram_end - 1))
192 		memblock_set_current_limit(max_mapped_addr - 4096);
193 
194 	max_pfn = PFN_DOWN(dram_end);
195 	max_low_pfn = max_pfn;
196 	set_max_mapnr(max_low_pfn);
197 
198 #ifdef CONFIG_BLK_DEV_INITRD
199 	setup_initrd();
200 #endif /* CONFIG_BLK_DEV_INITRD */
201 
202 	/*
203 	 * Avoid using early_init_fdt_reserve_self() since __pa() does
204 	 * not work for DTB pointers that are fixmap addresses
205 	 */
206 	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
207 
208 	early_init_fdt_scan_reserved_mem();
209 	memblock_allow_resize();
210 	memblock_dump_all();
211 }
212 
213 #ifdef CONFIG_MMU
214 static struct pt_alloc_ops pt_ops;
215 
216 unsigned long va_pa_offset;
217 EXPORT_SYMBOL(va_pa_offset);
218 unsigned long pfn_base;
219 EXPORT_SYMBOL(pfn_base);
220 
221 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
222 pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
223 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
224 
225 pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
226 
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t prot)227 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
228 {
229 	unsigned long addr = __fix_to_virt(idx);
230 	pte_t *ptep;
231 
232 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
233 
234 	ptep = &fixmap_pte[pte_index(addr)];
235 
236 	if (pgprot_val(prot))
237 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
238 	else
239 		pte_clear(&init_mm, addr, ptep);
240 	local_flush_tlb_page(addr);
241 }
242 
get_pte_virt_early(phys_addr_t pa)243 static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
244 {
245 	return (pte_t *)((uintptr_t)pa);
246 }
247 
get_pte_virt_fixmap(phys_addr_t pa)248 static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
249 {
250 	clear_fixmap(FIX_PTE);
251 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
252 }
253 
get_pte_virt_late(phys_addr_t pa)254 static inline pte_t *get_pte_virt_late(phys_addr_t pa)
255 {
256 	return (pte_t *) __va(pa);
257 }
258 
alloc_pte_early(uintptr_t va)259 static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
260 {
261 	/*
262 	 * We only create PMD or PGD early mappings so we
263 	 * should never reach here with MMU disabled.
264 	 */
265 	BUG();
266 }
267 
alloc_pte_fixmap(uintptr_t va)268 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
269 {
270 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
271 }
272 
alloc_pte_late(uintptr_t va)273 static phys_addr_t alloc_pte_late(uintptr_t va)
274 {
275 	unsigned long vaddr;
276 
277 	vaddr = __get_free_page(GFP_KERNEL);
278 	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
279 		BUG();
280 	return __pa(vaddr);
281 }
282 
create_pte_mapping(pte_t * ptep,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)283 static void __init create_pte_mapping(pte_t *ptep,
284 				      uintptr_t va, phys_addr_t pa,
285 				      phys_addr_t sz, pgprot_t prot)
286 {
287 	uintptr_t pte_idx = pte_index(va);
288 
289 	BUG_ON(sz != PAGE_SIZE);
290 
291 	if (pte_none(ptep[pte_idx]))
292 		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
293 }
294 
295 #ifndef __PAGETABLE_PMD_FOLDED
296 
297 pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
298 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
299 pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
300 pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
301 
get_pmd_virt_early(phys_addr_t pa)302 static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
303 {
304 	/* Before MMU is enabled */
305 	return (pmd_t *)((uintptr_t)pa);
306 }
307 
get_pmd_virt_fixmap(phys_addr_t pa)308 static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
309 {
310 	clear_fixmap(FIX_PMD);
311 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
312 }
313 
get_pmd_virt_late(phys_addr_t pa)314 static pmd_t *get_pmd_virt_late(phys_addr_t pa)
315 {
316 	return (pmd_t *) __va(pa);
317 }
318 
alloc_pmd_early(uintptr_t va)319 static phys_addr_t __init alloc_pmd_early(uintptr_t va)
320 {
321 	BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
322 
323 	return (uintptr_t)early_pmd;
324 }
325 
alloc_pmd_fixmap(uintptr_t va)326 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
327 {
328 	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
329 }
330 
alloc_pmd_late(uintptr_t va)331 static phys_addr_t alloc_pmd_late(uintptr_t va)
332 {
333 	unsigned long vaddr;
334 
335 	vaddr = __get_free_page(GFP_KERNEL);
336 	BUG_ON(!vaddr);
337 	return __pa(vaddr);
338 }
339 
create_pmd_mapping(pmd_t * pmdp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)340 static void __init create_pmd_mapping(pmd_t *pmdp,
341 				      uintptr_t va, phys_addr_t pa,
342 				      phys_addr_t sz, pgprot_t prot)
343 {
344 	pte_t *ptep;
345 	phys_addr_t pte_phys;
346 	uintptr_t pmd_idx = pmd_index(va);
347 
348 	if (sz == PMD_SIZE) {
349 		if (pmd_none(pmdp[pmd_idx]))
350 			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
351 		return;
352 	}
353 
354 	if (pmd_none(pmdp[pmd_idx])) {
355 		pte_phys = pt_ops.alloc_pte(va);
356 		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
357 		ptep = pt_ops.get_pte_virt(pte_phys);
358 		memset(ptep, 0, PAGE_SIZE);
359 	} else {
360 		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
361 		ptep = pt_ops.get_pte_virt(pte_phys);
362 	}
363 
364 	create_pte_mapping(ptep, va, pa, sz, prot);
365 }
366 
367 #define pgd_next_t		pmd_t
368 #define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
369 #define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
370 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
371 	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
372 #define fixmap_pgd_next		fixmap_pmd
373 #else
374 #define pgd_next_t		pte_t
375 #define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
376 #define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
377 #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
378 	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
379 #define fixmap_pgd_next		fixmap_pte
380 #endif
381 
create_pgd_mapping(pgd_t * pgdp,uintptr_t va,phys_addr_t pa,phys_addr_t sz,pgprot_t prot)382 void __init create_pgd_mapping(pgd_t *pgdp,
383 				      uintptr_t va, phys_addr_t pa,
384 				      phys_addr_t sz, pgprot_t prot)
385 {
386 	pgd_next_t *nextp;
387 	phys_addr_t next_phys;
388 	uintptr_t pgd_idx = pgd_index(va);
389 
390 	if (sz == PGDIR_SIZE) {
391 		if (pgd_val(pgdp[pgd_idx]) == 0)
392 			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
393 		return;
394 	}
395 
396 	if (pgd_val(pgdp[pgd_idx]) == 0) {
397 		next_phys = alloc_pgd_next(va);
398 		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
399 		nextp = get_pgd_next_virt(next_phys);
400 		memset(nextp, 0, PAGE_SIZE);
401 	} else {
402 		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
403 		nextp = get_pgd_next_virt(next_phys);
404 	}
405 
406 	create_pgd_next_mapping(nextp, va, pa, sz, prot);
407 }
408 
best_map_size(phys_addr_t base,phys_addr_t size)409 static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
410 {
411 	/* Upgrade to PMD_SIZE mappings whenever possible */
412 	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
413 		return PAGE_SIZE;
414 
415 	return PMD_SIZE;
416 }
417 
418 /*
419  * setup_vm() is called from head.S with MMU-off.
420  *
421  * Following requirements should be honoured for setup_vm() to work
422  * correctly:
423  * 1) It should use PC-relative addressing for accessing kernel symbols.
424  *    To achieve this we always use GCC cmodel=medany.
425  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
426  *    so disable compiler instrumentation when FTRACE is enabled.
427  *
428  * Currently, the above requirements are honoured by using custom CFLAGS
429  * for init.o in mm/Makefile.
430  */
431 
432 #ifndef __riscv_cmodel_medany
433 #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
434 #endif
435 
setup_vm(uintptr_t dtb_pa)436 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
437 {
438 	uintptr_t va, pa, end_va;
439 	uintptr_t load_pa = (uintptr_t)(&_start);
440 	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
441 	uintptr_t map_size;
442 #ifndef __PAGETABLE_PMD_FOLDED
443 	pmd_t fix_bmap_spmd, fix_bmap_epmd;
444 #endif
445 
446 	va_pa_offset = PAGE_OFFSET - load_pa;
447 	pfn_base = PFN_DOWN(load_pa);
448 
449 	/*
450 	 * Enforce boot alignment requirements of RV32 and
451 	 * RV64 by only allowing PMD or PGD mappings.
452 	 */
453 	map_size = PMD_SIZE;
454 
455 	/* Sanity check alignment and size */
456 	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
457 	BUG_ON((load_pa % map_size) != 0);
458 
459 	pt_ops.alloc_pte = alloc_pte_early;
460 	pt_ops.get_pte_virt = get_pte_virt_early;
461 #ifndef __PAGETABLE_PMD_FOLDED
462 	pt_ops.alloc_pmd = alloc_pmd_early;
463 	pt_ops.get_pmd_virt = get_pmd_virt_early;
464 #endif
465 	/* Setup early PGD for fixmap */
466 	create_pgd_mapping(early_pg_dir, FIXADDR_START,
467 			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
468 
469 #ifndef __PAGETABLE_PMD_FOLDED
470 	/* Setup fixmap PMD */
471 	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
472 			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
473 	/* Setup trampoline PGD and PMD */
474 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
475 			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
476 	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
477 			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
478 #else
479 	/* Setup trampoline PGD */
480 	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
481 			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
482 #endif
483 
484 	/*
485 	 * Setup early PGD covering entire kernel which will allows
486 	 * us to reach paging_init(). We map all memory banks later
487 	 * in setup_vm_final() below.
488 	 */
489 	end_va = PAGE_OFFSET + load_sz;
490 	for (va = PAGE_OFFSET; va < end_va; va += map_size)
491 		create_pgd_mapping(early_pg_dir, va,
492 				   load_pa + (va - PAGE_OFFSET),
493 				   map_size, PAGE_KERNEL_EXEC);
494 
495 #ifndef __PAGETABLE_PMD_FOLDED
496 	/* Setup early PMD for DTB */
497 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
498 			   (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
499 	/* Create two consecutive PMD mappings for FDT early scan */
500 	pa = dtb_pa & ~(PMD_SIZE - 1);
501 	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
502 			   pa, PMD_SIZE, PAGE_KERNEL);
503 	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
504 			   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
505 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
506 #else
507 	/* Create two consecutive PGD mappings for FDT early scan */
508 	pa = dtb_pa & ~(PGDIR_SIZE - 1);
509 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
510 			   pa, PGDIR_SIZE, PAGE_KERNEL);
511 	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
512 			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
513 	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
514 #endif
515 	dtb_early_pa = dtb_pa;
516 
517 	/*
518 	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
519 	 * range can not span multiple pmds.
520 	 */
521 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
522 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
523 
524 #ifndef __PAGETABLE_PMD_FOLDED
525 	/*
526 	 * Early ioremap fixmap is already created as it lies within first 2MB
527 	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
528 	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
529 	 * the user if not.
530 	 */
531 	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
532 	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
533 	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
534 		WARN_ON(1);
535 		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
536 			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
537 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
538 			fix_to_virt(FIX_BTMAP_BEGIN));
539 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
540 			fix_to_virt(FIX_BTMAP_END));
541 
542 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
543 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
544 	}
545 #endif
546 }
547 
setup_vm_final(void)548 static void __init setup_vm_final(void)
549 {
550 	uintptr_t va, map_size;
551 	phys_addr_t pa, start, end;
552 	u64 i;
553 
554 	/**
555 	 * MMU is enabled at this point. But page table setup is not complete yet.
556 	 * fixmap page table alloc functions should be used at this point
557 	 */
558 	pt_ops.alloc_pte = alloc_pte_fixmap;
559 	pt_ops.get_pte_virt = get_pte_virt_fixmap;
560 #ifndef __PAGETABLE_PMD_FOLDED
561 	pt_ops.alloc_pmd = alloc_pmd_fixmap;
562 	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
563 #endif
564 	/* Setup swapper PGD for fixmap */
565 	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
566 			   __pa_symbol(fixmap_pgd_next),
567 			   PGDIR_SIZE, PAGE_TABLE);
568 
569 	/* Map all memory banks */
570 	for_each_mem_range(i, &start, &end) {
571 		if (start >= end)
572 			break;
573 		if (start <= __pa(PAGE_OFFSET) &&
574 		    __pa(PAGE_OFFSET) < end)
575 			start = __pa(PAGE_OFFSET);
576 
577 		map_size = best_map_size(start, end - start);
578 		for (pa = start; pa < end; pa += map_size) {
579 			va = (uintptr_t)__va(pa);
580 			create_pgd_mapping(swapper_pg_dir, va, pa,
581 					   map_size, PAGE_KERNEL_EXEC);
582 		}
583 	}
584 
585 	/* Clear fixmap PTE and PMD mappings */
586 	clear_fixmap(FIX_PTE);
587 	clear_fixmap(FIX_PMD);
588 
589 	/* Move to swapper page table */
590 	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
591 	local_flush_tlb_all();
592 
593 	/* generic page allocation functions must be used to setup page table */
594 	pt_ops.alloc_pte = alloc_pte_late;
595 	pt_ops.get_pte_virt = get_pte_virt_late;
596 #ifndef __PAGETABLE_PMD_FOLDED
597 	pt_ops.alloc_pmd = alloc_pmd_late;
598 	pt_ops.get_pmd_virt = get_pmd_virt_late;
599 #endif
600 }
601 #else
setup_vm(uintptr_t dtb_pa)602 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
603 {
604 #ifdef CONFIG_BUILTIN_DTB
605 	dtb_early_va = soc_lookup_builtin_dtb();
606 	if (!dtb_early_va) {
607 		/* Fallback to first available DTS */
608 		dtb_early_va = (void *) __dtb_start;
609 	}
610 #else
611 	dtb_early_va = (void *)dtb_pa;
612 #endif
613 	dtb_early_pa = dtb_pa;
614 }
615 
setup_vm_final(void)616 static inline void setup_vm_final(void)
617 {
618 }
619 #endif /* CONFIG_MMU */
620 
621 #ifdef CONFIG_STRICT_KERNEL_RWX
mark_rodata_ro(void)622 void mark_rodata_ro(void)
623 {
624 	unsigned long text_start = (unsigned long)_text;
625 	unsigned long text_end = (unsigned long)_etext;
626 	unsigned long rodata_start = (unsigned long)__start_rodata;
627 	unsigned long data_start = (unsigned long)_data;
628 	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
629 
630 	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
631 	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
632 	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
633 	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
634 
635 	debug_checkwx();
636 }
637 #endif
638 
resource_init(void)639 static void __init resource_init(void)
640 {
641 	struct memblock_region *region;
642 
643 	for_each_mem_region(region) {
644 		struct resource *res;
645 
646 		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
647 		if (!res)
648 			panic("%s: Failed to allocate %zu bytes\n", __func__,
649 			      sizeof(struct resource));
650 
651 		if (memblock_is_nomap(region)) {
652 			res->name = "reserved";
653 			res->flags = IORESOURCE_MEM;
654 		} else {
655 			res->name = "System RAM";
656 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
657 		}
658 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
659 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
660 
661 		request_resource(&iomem_resource, res);
662 	}
663 }
664 
paging_init(void)665 void __init paging_init(void)
666 {
667 	setup_vm_final();
668 	sparse_init();
669 	setup_zero_page();
670 	zone_sizes_init();
671 	resource_init();
672 }
673 
674 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)675 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
676 			       struct vmem_altmap *altmap)
677 {
678 	return vmemmap_populate_basepages(start, end, node, NULL);
679 }
680 #endif
681