• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/init.c
4  *
5  *  Copyright (C) 1995-2005 Russell King
6  */
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/init.h>
11 #include <linux/mman.h>
12 #include <linux/sched/signal.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/initrd.h>
17 #include <linux/of_fdt.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/dma-map-ops.h>
22 #include <linux/sizes.h>
23 #include <linux/stop_machine.h>
24 #include <linux/swiotlb.h>
25 
26 #include <asm/cp15.h>
27 #include <asm/mach-types.h>
28 #include <asm/memblock.h>
29 #include <asm/memory.h>
30 #include <asm/prom.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/set_memory.h>
34 #include <asm/system_info.h>
35 #include <asm/tlb.h>
36 #include <asm/fixmap.h>
37 #include <asm/ptdump.h>
38 
39 #include <asm/mach/arch.h>
40 #include <asm/mach/map.h>
41 
42 #include "mm.h"
43 
44 #ifdef CONFIG_CPU_CP15_MMU
__clear_cr(unsigned long mask)45 unsigned long __init __clear_cr(unsigned long mask)
46 {
47 	cr_alignment = cr_alignment & ~mask;
48 	return cr_alignment;
49 }
50 #endif
51 
52 #ifdef CONFIG_BLK_DEV_INITRD
parse_tag_initrd(const struct tag * tag)53 static int __init parse_tag_initrd(const struct tag *tag)
54 {
55 	pr_warn("ATAG_INITRD is deprecated; "
56 		"please update your bootloader.\n");
57 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
58 	phys_initrd_size = tag->u.initrd.size;
59 	return 0;
60 }
61 
62 __tagtable(ATAG_INITRD, parse_tag_initrd);
63 
parse_tag_initrd2(const struct tag * tag)64 static int __init parse_tag_initrd2(const struct tag *tag)
65 {
66 	phys_initrd_start = tag->u.initrd.start;
67 	phys_initrd_size = tag->u.initrd.size;
68 	return 0;
69 }
70 
71 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
72 #endif
73 
find_limits(unsigned long * min,unsigned long * max_low,unsigned long * max_high)74 static void __init find_limits(unsigned long *min, unsigned long *max_low,
75 			       unsigned long *max_high)
76 {
77 	*max_low = PFN_DOWN(memblock_get_current_limit());
78 	*min = PFN_UP(memblock_start_of_DRAM());
79 	*max_high = PFN_DOWN(memblock_end_of_DRAM());
80 }
81 
82 #ifdef CONFIG_ZONE_DMA
83 
84 phys_addr_t arm_dma_zone_size __read_mostly;
85 EXPORT_SYMBOL(arm_dma_zone_size);
86 
87 /*
88  * The DMA mask corresponding to the maximum bus address allocatable
89  * using GFP_DMA.  The default here places no restriction on DMA
90  * allocations.  This must be the smallest DMA mask in the system,
91  * so a successful GFP_DMA allocation will always satisfy this.
92  */
93 phys_addr_t arm_dma_limit;
94 unsigned long arm_dma_pfn_limit;
95 #endif
96 
setup_dma_zone(const struct machine_desc * mdesc)97 void __init setup_dma_zone(const struct machine_desc *mdesc)
98 {
99 #ifdef CONFIG_ZONE_DMA
100 	if (mdesc->dma_zone_size) {
101 		arm_dma_zone_size = mdesc->dma_zone_size;
102 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
103 	} else
104 		arm_dma_limit = 0xffffffff;
105 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
106 #endif
107 }
108 
zone_sizes_init(unsigned long min,unsigned long max_low,unsigned long max_high)109 static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
110 	unsigned long max_high)
111 {
112 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
113 
114 #ifdef CONFIG_ZONE_DMA
115 	max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
116 #endif
117 	max_zone_pfn[ZONE_NORMAL] = max_low;
118 #ifdef CONFIG_HIGHMEM
119 	max_zone_pfn[ZONE_HIGHMEM] = max_high;
120 #endif
121 	free_area_init(max_zone_pfn);
122 }
123 
124 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
pfn_valid(unsigned long pfn)125 int pfn_valid(unsigned long pfn)
126 {
127 	phys_addr_t addr = __pfn_to_phys(pfn);
128 	unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
129 
130 	if (__phys_to_pfn(addr) != pfn)
131 		return 0;
132 
133 	/*
134 	 * If address less than pageblock_size bytes away from a present
135 	 * memory chunk there still will be a memory map entry for it
136 	 * because we round freed memory map to the pageblock boundaries.
137 	 */
138 	if (memblock_overlaps_region(&memblock.memory,
139 				     ALIGN_DOWN(addr, pageblock_size),
140 				     pageblock_size))
141 		return 1;
142 
143 	return 0;
144 }
145 EXPORT_SYMBOL(pfn_valid);
146 #endif
147 
148 static bool arm_memblock_steal_permitted = true;
149 
arm_memblock_steal(phys_addr_t size,phys_addr_t align)150 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
151 {
152 	phys_addr_t phys;
153 
154 	BUG_ON(!arm_memblock_steal_permitted);
155 
156 	phys = memblock_phys_alloc(size, align);
157 	if (!phys)
158 		panic("Failed to steal %pa bytes at %pS\n",
159 		      &size, (void *)_RET_IP_);
160 
161 	memblock_free(phys, size);
162 	memblock_remove(phys, size);
163 
164 	return phys;
165 }
166 
arm_initrd_init(void)167 static void __init arm_initrd_init(void)
168 {
169 #ifdef CONFIG_BLK_DEV_INITRD
170 	phys_addr_t start;
171 	unsigned long size;
172 
173 	initrd_start = initrd_end = 0;
174 
175 	if (!phys_initrd_size)
176 		return;
177 
178 	/*
179 	 * Round the memory region to page boundaries as per free_initrd_mem()
180 	 * This allows us to detect whether the pages overlapping the initrd
181 	 * are in use, but more importantly, reserves the entire set of pages
182 	 * as we don't want these pages allocated for other purposes.
183 	 */
184 	start = round_down(phys_initrd_start, PAGE_SIZE);
185 	size = phys_initrd_size + (phys_initrd_start - start);
186 	size = round_up(size, PAGE_SIZE);
187 
188 	if (!memblock_is_region_memory(start, size)) {
189 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
190 		       (u64)start, size);
191 		return;
192 	}
193 
194 	if (memblock_is_region_reserved(start, size)) {
195 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
196 		       (u64)start, size);
197 		return;
198 	}
199 
200 	memblock_reserve(start, size);
201 
202 	/* Now convert initrd to virtual addresses */
203 	initrd_start = __phys_to_virt(phys_initrd_start);
204 	initrd_end = initrd_start + phys_initrd_size;
205 #endif
206 }
207 
208 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
check_cpu_icache_size(int cpuid)209 void check_cpu_icache_size(int cpuid)
210 {
211 	u32 size, ctr;
212 
213 	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
214 
215 	size = 1 << ((ctr & 0xf) + 2);
216 	if (cpuid != 0 && icache_size != size)
217 		pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
218 			cpuid);
219 	if (icache_size > size)
220 		icache_size = size;
221 }
222 #endif
223 
arm_memblock_init(const struct machine_desc * mdesc)224 void __init arm_memblock_init(const struct machine_desc *mdesc)
225 {
226 	/* Register the kernel text, kernel data and initrd with memblock. */
227 	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
228 
229 	arm_initrd_init();
230 
231 	arm_mm_memblock_reserve();
232 
233 	/* reserve any platform specific memblock areas */
234 	if (mdesc->reserve)
235 		mdesc->reserve();
236 
237 	early_init_fdt_scan_reserved_mem();
238 
239 	/* reserve memory for DMA contiguous allocations */
240 	dma_contiguous_reserve(arm_dma_limit);
241 
242 	arm_memblock_steal_permitted = false;
243 	memblock_dump_all();
244 }
245 
bootmem_init(void)246 void __init bootmem_init(void)
247 {
248 	memblock_allow_resize();
249 
250 	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
251 
252 	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
253 		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
254 
255 	/*
256 	 * sparse_init() tries to allocate memory from memblock, so must be
257 	 * done after the fixed reservations
258 	 */
259 	sparse_init();
260 
261 	/*
262 	 * Now free the memory - free_area_init needs
263 	 * the sparse mem_map arrays initialized by sparse_init()
264 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
265 	 */
266 	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
267 }
268 
269 /*
270  * Poison init memory with an undefined instruction (ARM) or a branch to an
271  * undefined instruction (Thumb).
272  */
poison_init_mem(void * s,size_t count)273 static inline void poison_init_mem(void *s, size_t count)
274 {
275 	u32 *p = (u32 *)s;
276 	for (; count != 0; count -= 4)
277 		*p++ = 0xe7fddef0;
278 }
279 
280 static inline void __init
free_memmap(unsigned long start_pfn,unsigned long end_pfn)281 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
282 {
283 	struct page *start_pg, *end_pg;
284 	phys_addr_t pg, pgend;
285 
286 	/*
287 	 * Convert start_pfn/end_pfn to a struct page pointer.
288 	 */
289 	start_pg = pfn_to_page(start_pfn - 1) + 1;
290 	end_pg = pfn_to_page(end_pfn - 1) + 1;
291 
292 	/*
293 	 * Convert to physical addresses, and
294 	 * round start upwards and end downwards.
295 	 */
296 	pg = PAGE_ALIGN(__pa(start_pg));
297 	pgend = __pa(end_pg) & PAGE_MASK;
298 
299 	/*
300 	 * If there are free pages between these,
301 	 * free the section of the memmap array.
302 	 */
303 	if (pg < pgend)
304 		memblock_free_early(pg, pgend - pg);
305 }
306 
307 /*
308  * The mem_map array can get very big.  Free the unused area of the memory map.
309  */
free_unused_memmap(void)310 static void __init free_unused_memmap(void)
311 {
312 	unsigned long start, end, prev_end = 0;
313 	int i;
314 
315 	/*
316 	 * This relies on each bank being in address order.
317 	 * The banks are sorted previously in bootmem_init().
318 	 */
319 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
320 #ifdef CONFIG_SPARSEMEM
321 		/*
322 		 * Take care not to free memmap entries that don't exist
323 		 * due to SPARSEMEM sections which aren't present.
324 		 */
325 		start = min(start,
326 				 ALIGN(prev_end, PAGES_PER_SECTION));
327 #endif
328 		/*
329 		 * Align down here since many operations in VM subsystem
330 		 * presume that there are no holes in the memory map inside
331 		 * a pageblock
332 		 */
333 		start = round_down(start, pageblock_nr_pages);
334 
335 		/*
336 		 * If we had a previous bank, and there is a space
337 		 * between the current bank and the previous, free it.
338 		 */
339 		if (prev_end && prev_end < start)
340 			free_memmap(prev_end, start);
341 
342 		/*
343 		 * Align up here since many operations in VM subsystem
344 		 * presume that there are no holes in the memory map inside
345 		 * a pageblock
346 		 */
347 		prev_end = ALIGN(end, pageblock_nr_pages);
348 	}
349 
350 #ifdef CONFIG_SPARSEMEM
351 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
352 		prev_end = ALIGN(end, pageblock_nr_pages);
353 		free_memmap(prev_end,
354 			    ALIGN(prev_end, PAGES_PER_SECTION));
355 	}
356 #endif
357 }
358 
free_highpages(void)359 static void __init free_highpages(void)
360 {
361 #ifdef CONFIG_HIGHMEM
362 	unsigned long max_low = max_low_pfn;
363 	phys_addr_t range_start, range_end;
364 	u64 i;
365 
366 	/* set highmem page free */
367 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
368 				&range_start, &range_end, NULL) {
369 		unsigned long start = PFN_UP(range_start);
370 		unsigned long end = PFN_DOWN(range_end);
371 
372 		/* Ignore complete lowmem entries */
373 		if (end <= max_low)
374 			continue;
375 
376 		/* Truncate partial highmem entries */
377 		if (start < max_low)
378 			start = max_low;
379 
380 		for (; start < end; start++)
381 			free_highmem_page(pfn_to_page(start));
382 	}
383 #endif
384 }
385 
386 /*
387  * mem_init() marks the free areas in the mem_map and tells us how much
388  * memory is free.  This is done after various parts of the system have
389  * claimed their memory after the kernel image.
390  */
mem_init(void)391 void __init mem_init(void)
392 {
393 #ifdef CONFIG_ARM_LPAE
394 	if (swiotlb_force == SWIOTLB_FORCE ||
395 	    max_pfn > arm_dma_pfn_limit)
396 		swiotlb_init(1);
397 	else
398 		swiotlb_force = SWIOTLB_NO_FORCE;
399 #endif
400 
401 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
402 
403 	/* this will put all unused low memory onto the freelists */
404 	free_unused_memmap();
405 	memblock_free_all();
406 
407 #ifdef CONFIG_SA1111
408 	/* now that our DMA memory is actually so designated, we can free it */
409 	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
410 #endif
411 
412 	free_highpages();
413 
414 	mem_init_print_info(NULL);
415 
416 	/*
417 	 * Check boundaries twice: Some fundamental inconsistencies can
418 	 * be detected at build time already.
419 	 */
420 #ifdef CONFIG_MMU
421 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
422 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
423 #endif
424 
425 #ifdef CONFIG_HIGHMEM
426 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
427 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
428 #endif
429 }
430 
431 #ifdef CONFIG_STRICT_KERNEL_RWX
432 struct section_perm {
433 	const char *name;
434 	unsigned long start;
435 	unsigned long end;
436 	pmdval_t mask;
437 	pmdval_t prot;
438 	pmdval_t clear;
439 };
440 
441 /* First section-aligned location at or after __start_rodata. */
442 extern char __start_rodata_section_aligned[];
443 
444 static struct section_perm nx_perms[] = {
445 	/* Make pages tables, etc before _stext RW (set NX). */
446 	{
447 		.name	= "pre-text NX",
448 		.start	= PAGE_OFFSET,
449 		.end	= (unsigned long)_stext,
450 		.mask	= ~PMD_SECT_XN,
451 		.prot	= PMD_SECT_XN,
452 	},
453 	/* Make init RW (set NX). */
454 	{
455 		.name	= "init NX",
456 		.start	= (unsigned long)__init_begin,
457 		.end	= (unsigned long)_sdata,
458 		.mask	= ~PMD_SECT_XN,
459 		.prot	= PMD_SECT_XN,
460 	},
461 	/* Make rodata NX (set RO in ro_perms below). */
462 	{
463 		.name	= "rodata NX",
464 		.start  = (unsigned long)__start_rodata_section_aligned,
465 		.end    = (unsigned long)__init_begin,
466 		.mask   = ~PMD_SECT_XN,
467 		.prot   = PMD_SECT_XN,
468 	},
469 };
470 
471 static struct section_perm ro_perms[] = {
472 	/* Make kernel code and rodata RX (set RO). */
473 	{
474 		.name	= "text/rodata RO",
475 		.start  = (unsigned long)_stext,
476 		.end    = (unsigned long)__init_begin,
477 #ifdef CONFIG_ARM_LPAE
478 		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
479 		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
480 #else
481 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
482 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
483 		.clear  = PMD_SECT_AP_WRITE,
484 #endif
485 	},
486 };
487 
488 /*
489  * Updates section permissions only for the current mm (sections are
490  * copied into each mm). During startup, this is the init_mm. Is only
491  * safe to be called with preemption disabled, as under stop_machine().
492  */
section_update(unsigned long addr,pmdval_t mask,pmdval_t prot,struct mm_struct * mm)493 static inline void section_update(unsigned long addr, pmdval_t mask,
494 				  pmdval_t prot, struct mm_struct *mm)
495 {
496 	pmd_t *pmd;
497 
498 	pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
499 
500 #ifdef CONFIG_ARM_LPAE
501 	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
502 #else
503 	if (addr & SECTION_SIZE)
504 		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
505 	else
506 		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
507 #endif
508 	flush_pmd_entry(pmd);
509 	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
510 }
511 
512 /* Make sure extended page tables are in use. */
arch_has_strict_perms(void)513 static inline bool arch_has_strict_perms(void)
514 {
515 	if (cpu_architecture() < CPU_ARCH_ARMv6)
516 		return false;
517 
518 	return !!(get_cr() & CR_XP);
519 }
520 
set_section_perms(struct section_perm * perms,int n,bool set,struct mm_struct * mm)521 static void set_section_perms(struct section_perm *perms, int n, bool set,
522 			      struct mm_struct *mm)
523 {
524 	size_t i;
525 	unsigned long addr;
526 
527 	if (!arch_has_strict_perms())
528 		return;
529 
530 	for (i = 0; i < n; i++) {
531 		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
532 		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
533 			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
534 				perms[i].name, perms[i].start, perms[i].end,
535 				SECTION_SIZE);
536 			continue;
537 		}
538 
539 		for (addr = perms[i].start;
540 		     addr < perms[i].end;
541 		     addr += SECTION_SIZE)
542 			section_update(addr, perms[i].mask,
543 				set ? perms[i].prot : perms[i].clear, mm);
544 	}
545 
546 }
547 
548 /**
549  * update_sections_early intended to be called only through stop_machine
550  * framework and executed by only one CPU while all other CPUs will spin and
551  * wait, so no locking is required in this function.
552  */
update_sections_early(struct section_perm perms[],int n)553 static void update_sections_early(struct section_perm perms[], int n)
554 {
555 	struct task_struct *t, *s;
556 
557 	for_each_process(t) {
558 		if (t->flags & PF_KTHREAD)
559 			continue;
560 		for_each_thread(t, s)
561 			if (s->mm)
562 				set_section_perms(perms, n, true, s->mm);
563 	}
564 	set_section_perms(perms, n, true, current->active_mm);
565 	set_section_perms(perms, n, true, &init_mm);
566 }
567 
__fix_kernmem_perms(void * unused)568 static int __fix_kernmem_perms(void *unused)
569 {
570 	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
571 	return 0;
572 }
573 
fix_kernmem_perms(void)574 static void fix_kernmem_perms(void)
575 {
576 	stop_machine(__fix_kernmem_perms, NULL, NULL);
577 }
578 
__mark_rodata_ro(void * unused)579 static int __mark_rodata_ro(void *unused)
580 {
581 	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
582 	return 0;
583 }
584 
585 static int kernel_set_to_readonly __read_mostly;
586 
mark_rodata_ro(void)587 void mark_rodata_ro(void)
588 {
589 	kernel_set_to_readonly = 1;
590 	stop_machine(__mark_rodata_ro, NULL, NULL);
591 	debug_checkwx();
592 }
593 
set_kernel_text_rw(void)594 void set_kernel_text_rw(void)
595 {
596 	if (!kernel_set_to_readonly)
597 		return;
598 
599 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
600 				current->active_mm);
601 }
602 
set_kernel_text_ro(void)603 void set_kernel_text_ro(void)
604 {
605 	if (!kernel_set_to_readonly)
606 		return;
607 
608 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
609 				current->active_mm);
610 }
611 
612 #else
fix_kernmem_perms(void)613 static inline void fix_kernmem_perms(void) { }
614 #endif /* CONFIG_STRICT_KERNEL_RWX */
615 
free_initmem(void)616 void free_initmem(void)
617 {
618 	fix_kernmem_perms();
619 
620 	poison_init_mem(__init_begin, __init_end - __init_begin);
621 	if (!machine_is_integrator() && !machine_is_cintegrator())
622 		free_initmem_default(-1);
623 }
624 
625 #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)626 void free_initrd_mem(unsigned long start, unsigned long end)
627 {
628 	if (start == initrd_start)
629 		start = round_down(start, PAGE_SIZE);
630 	if (end == initrd_end)
631 		end = round_up(end, PAGE_SIZE);
632 
633 	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
634 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
635 }
636 #endif
637