• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for handling the MMU on those
4  * PowerPC implementations where the MMU substantially follows the
5  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
6  * and 8260 implementations but excludes the 8xx and 4xx.
7  *  -- paulus
8  *
9  *  Derived from arch/ppc/mm/init.c:
10  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11  *
12  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
14  *    Copyright (C) 1996 Paul Mackerras
15  *
16  *  Derived from "arch/i386/mm/init.c"
17  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
25 
26 #include <asm/prom.h>
27 #include <asm/mmu.h>
28 #include <asm/machdep.h>
29 #include <asm/code-patching.h>
30 #include <asm/sections.h>
31 
32 #include <mm/mmu_decl.h>
33 
34 u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
35 
36 static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
37 static unsigned long __initdata Hash_size, Hash_mask;
38 static unsigned int __initdata hash_mb, hash_mb2;
39 unsigned long __initdata _SDR1;
40 
41 struct ppc_bat BATS[8][2];	/* 8 pairs of IBAT, DBAT */
42 
43 static struct batrange {	/* stores address ranges mapped by BATs */
44 	unsigned long start;
45 	unsigned long limit;
46 	phys_addr_t phys;
47 } bat_addrs[8];
48 
49 #ifdef CONFIG_SMP
50 unsigned long mmu_hash_lock;
51 #endif
52 
53 /*
54  * Return PA for this VA if it is mapped by a BAT, or 0
55  */
v_block_mapped(unsigned long va)56 phys_addr_t v_block_mapped(unsigned long va)
57 {
58 	int b;
59 	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
60 		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
61 			return bat_addrs[b].phys + (va - bat_addrs[b].start);
62 	return 0;
63 }
64 
65 /*
66  * Return VA for a given PA or 0 if not mapped
67  */
p_block_mapped(phys_addr_t pa)68 unsigned long p_block_mapped(phys_addr_t pa)
69 {
70 	int b;
71 	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
72 		if (pa >= bat_addrs[b].phys
73 	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
74 		              +bat_addrs[b].phys)
75 			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
76 	return 0;
77 }
78 
find_free_bat(void)79 int __init find_free_bat(void)
80 {
81 	int b;
82 	int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
83 
84 	for (b = 0; b < n; b++) {
85 		struct ppc_bat *bat = BATS[b];
86 
87 		if (!(bat[1].batu & 3))
88 			return b;
89 	}
90 	return -1;
91 }
92 
93 /*
94  * This function calculates the size of the larger block usable to map the
95  * beginning of an area based on the start address and size of that area:
96  * - max block size is 256 on 6xx.
97  * - base address must be aligned to the block size. So the maximum block size
98  *   is identified by the lowest bit set to 1 in the base address (for instance
99  *   if base is 0x16000000, max size is 0x02000000).
100  * - block size has to be a power of two. This is calculated by finding the
101  *   highest bit set to 1.
102  */
bat_block_size(unsigned long base,unsigned long top)103 unsigned int bat_block_size(unsigned long base, unsigned long top)
104 {
105 	unsigned int max_size = SZ_256M;
106 	unsigned int base_shift = (ffs(base) - 1) & 31;
107 	unsigned int block_shift = (fls(top - base) - 1) & 31;
108 
109 	return min3(max_size, 1U << base_shift, 1U << block_shift);
110 }
111 
112 /*
113  * Set up one of the IBAT (block address translation) register pairs.
114  * The parameters are not checked; in particular size must be a power
115  * of 2 between 128k and 256M.
116  */
setibat(int index,unsigned long virt,phys_addr_t phys,unsigned int size,pgprot_t prot)117 static void setibat(int index, unsigned long virt, phys_addr_t phys,
118 		    unsigned int size, pgprot_t prot)
119 {
120 	unsigned int bl = (size >> 17) - 1;
121 	int wimgxpp;
122 	struct ppc_bat *bat = BATS[index];
123 	unsigned long flags = pgprot_val(prot);
124 
125 	if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
126 		flags &= ~_PAGE_COHERENT;
127 
128 	wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
129 	bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
130 	bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
131 	if (flags & _PAGE_USER)
132 		bat[0].batu |= 1;	/* Vp = 1 */
133 }
134 
clearibat(int index)135 static void clearibat(int index)
136 {
137 	struct ppc_bat *bat = BATS[index];
138 
139 	bat[0].batu = 0;
140 	bat[0].batl = 0;
141 }
142 
__mmu_mapin_ram(unsigned long base,unsigned long top)143 static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
144 {
145 	int idx;
146 
147 	while ((idx = find_free_bat()) != -1 && base != top) {
148 		unsigned int size = bat_block_size(base, top);
149 
150 		if (size < 128 << 10)
151 			break;
152 		setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
153 		base += size;
154 	}
155 
156 	return base;
157 }
158 
mmu_mapin_ram(unsigned long base,unsigned long top)159 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
160 {
161 	unsigned long done;
162 	unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET;
163 	unsigned long size;
164 
165 	size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
166 	setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
167 
168 	if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
169 		pr_debug_once("Read-Write memory mapped without BATs\n");
170 		if (base >= border)
171 			return base;
172 		if (top >= border)
173 			top = border;
174 	}
175 
176 	if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
177 		return __mmu_mapin_ram(base, top);
178 
179 	done = __mmu_mapin_ram(base, border);
180 	if (done != border)
181 		return done;
182 
183 	return __mmu_mapin_ram(border, top);
184 }
185 
is_module_segment(unsigned long addr)186 static bool is_module_segment(unsigned long addr)
187 {
188 	if (!IS_ENABLED(CONFIG_MODULES))
189 		return false;
190 	if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
191 		return false;
192 	if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
193 		return false;
194 	return true;
195 }
196 
mmu_mark_initmem_nx(void)197 void mmu_mark_initmem_nx(void)
198 {
199 	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
200 	int i;
201 	unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
202 	unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
203 	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
204 	unsigned long size;
205 
206 	for (i = 0; i < nb - 1 && base < top;) {
207 		size = bat_block_size(base, top);
208 		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
209 		base += size;
210 	}
211 	if (base < top) {
212 		size = bat_block_size(base, top);
213 		if ((top - base) > size) {
214 			size <<= 1;
215 			if (strict_kernel_rwx_enabled() && base + size > border)
216 				pr_warn("Some RW data is getting mapped X. "
217 					"Adjust CONFIG_DATA_SHIFT to avoid that.\n");
218 		}
219 		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
220 		base += size;
221 	}
222 	for (; i < nb; i++)
223 		clearibat(i);
224 
225 	update_bats();
226 
227 	for (i = TASK_SIZE >> 28; i < 16; i++) {
228 		/* Do not set NX on VM space for modules */
229 		if (is_module_segment(i << 28))
230 			continue;
231 
232 		mtsr(mfsr(i << 28) | 0x10000000, i << 28);
233 	}
234 }
235 
mmu_mark_rodata_ro(void)236 void mmu_mark_rodata_ro(void)
237 {
238 	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
239 	int i;
240 
241 	for (i = 0; i < nb; i++) {
242 		struct ppc_bat *bat = BATS[i];
243 
244 		if (bat_addrs[i].start < (unsigned long)__init_begin)
245 			bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
246 	}
247 
248 	update_bats();
249 }
250 
251 /*
252  * Set up one of the D BAT (block address translation) register pairs.
253  * The parameters are not checked; in particular size must be a power
254  * of 2 between 128k and 256M.
255  */
setbat(int index,unsigned long virt,phys_addr_t phys,unsigned int size,pgprot_t prot)256 void __init setbat(int index, unsigned long virt, phys_addr_t phys,
257 		   unsigned int size, pgprot_t prot)
258 {
259 	unsigned int bl;
260 	int wimgxpp;
261 	struct ppc_bat *bat;
262 	unsigned long flags = pgprot_val(prot);
263 
264 	if (index == -1)
265 		index = find_free_bat();
266 	if (index == -1) {
267 		pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
268 		       (unsigned long long)phys);
269 		return;
270 	}
271 	bat = BATS[index];
272 
273 	if ((flags & _PAGE_NO_CACHE) ||
274 	    (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
275 		flags &= ~_PAGE_COHERENT;
276 
277 	bl = (size >> 17) - 1;
278 	/* Do DBAT first */
279 	wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
280 			   | _PAGE_COHERENT | _PAGE_GUARDED);
281 	wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
282 	bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
283 	bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
284 	if (flags & _PAGE_USER)
285 		bat[1].batu |= 1; 	/* Vp = 1 */
286 	if (flags & _PAGE_GUARDED) {
287 		/* G bit must be zero in IBATs */
288 		flags &= ~_PAGE_EXEC;
289 	}
290 
291 	bat_addrs[index].start = virt;
292 	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
293 	bat_addrs[index].phys = phys;
294 }
295 
296 /*
297  * Preload a translation in the hash table
298  */
hash_preload(struct mm_struct * mm,unsigned long ea)299 static void hash_preload(struct mm_struct *mm, unsigned long ea)
300 {
301 	pmd_t *pmd;
302 
303 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
304 		return;
305 	pmd = pmd_off(mm, ea);
306 	if (!pmd_none(*pmd))
307 		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
308 }
309 
310 /*
311  * This is called at the end of handling a user page fault, when the
312  * fault has been handled by updating a PTE in the linux page tables.
313  * We use it to preload an HPTE into the hash table corresponding to
314  * the updated linux PTE.
315  *
316  * This must always be called with the pte lock held.
317  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)318 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
319 		      pte_t *ptep)
320 {
321 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
322 		return;
323 	/*
324 	 * We don't need to worry about _PAGE_PRESENT here because we are
325 	 * called with either mm->page_table_lock held or ptl lock held
326 	 */
327 
328 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
329 	if (!pte_young(*ptep) || address >= TASK_SIZE)
330 		return;
331 
332 	/* We have to test for regs NULL since init will get here first thing at boot */
333 	if (!current->thread.regs)
334 		return;
335 
336 	/* We also avoid filling the hash if not coming from a fault */
337 	if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
338 		return;
339 
340 	hash_preload(vma->vm_mm, address);
341 }
342 
343 /*
344  * Initialize the hash table and patch the instructions in hashtable.S.
345  */
MMU_init_hw(void)346 void __init MMU_init_hw(void)
347 {
348 	unsigned int n_hpteg, lg_n_hpteg;
349 
350 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
351 		return;
352 
353 	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
354 
355 #define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
356 #define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
357 #define MIN_N_HPTEG	1024		/* min 64kB hash table */
358 
359 	/*
360 	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
361 	 * This is less than the recommended amount, but then
362 	 * Linux ain't AIX.
363 	 */
364 	n_hpteg = total_memory / (PAGE_SIZE * 8);
365 	if (n_hpteg < MIN_N_HPTEG)
366 		n_hpteg = MIN_N_HPTEG;
367 	lg_n_hpteg = __ilog2(n_hpteg);
368 	if (n_hpteg & (n_hpteg - 1)) {
369 		++lg_n_hpteg;		/* round up if not power of 2 */
370 		n_hpteg = 1 << lg_n_hpteg;
371 	}
372 	Hash_size = n_hpteg << LG_HPTEG_SIZE;
373 
374 	/*
375 	 * Find some memory for the hash table.
376 	 */
377 	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
378 	Hash = memblock_alloc(Hash_size, Hash_size);
379 	if (!Hash)
380 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
381 		      __func__, Hash_size, Hash_size);
382 	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
383 
384 	pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
385 		(unsigned long long)(total_memory >> 20), Hash_size >> 10);
386 
387 
388 	Hash_mask = n_hpteg - 1;
389 	hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
390 	if (lg_n_hpteg > 16)
391 		hash_mb2 = 16 - LG_HPTEG_SIZE;
392 }
393 
MMU_init_hw_patch(void)394 void __init MMU_init_hw_patch(void)
395 {
396 	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
397 	unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
398 
399 	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
400 		return;
401 
402 	if (ppc_md.progress)
403 		ppc_md.progress("hash:patch", 0x345);
404 	if (ppc_md.progress)
405 		ppc_md.progress("hash:done", 0x205);
406 
407 	/* WARNING: Make sure nothing can trigger a KASAN check past this point */
408 
409 	/*
410 	 * Patch up the instructions in hashtable.S:create_hpte
411 	 */
412 	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
413 	modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
414 	modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
415 	modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
416 	modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
417 
418 	/*
419 	 * Patch up the instructions in hashtable.S:flush_hash_page
420 	 */
421 	modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
422 	modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
423 	modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
424 	modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
425 }
426 
setup_initial_memory_limit(phys_addr_t first_memblock_base,phys_addr_t first_memblock_size)427 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
428 				phys_addr_t first_memblock_size)
429 {
430 	/* We don't currently support the first MEMBLOCK not mapping 0
431 	 * physical on those processors
432 	 */
433 	BUG_ON(first_memblock_base != 0);
434 
435 	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
436 }
437 
print_system_hash_info(void)438 void __init print_system_hash_info(void)
439 {
440 	pr_info("Hash_size         = 0x%lx\n", Hash_size);
441 	if (Hash_mask)
442 		pr_info("Hash_mask         = 0x%lx\n", Hash_mask);
443 }
444 
early_init_mmu(void)445 void __init early_init_mmu(void)
446 {
447 }
448