• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Page table handling routines for radix page table.
3  *
4  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) "radix-mmu: " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/sched/mm.h>
16 #include <linux/memblock.h>
17 #include <linux/of_fdt.h>
18 #include <linux/mm.h>
19 #include <linux/string_helpers.h>
20 #include <linux/stop_machine.h>
21 
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/mmu_context.h>
25 #include <asm/dma.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu.h>
28 #include <asm/firmware.h>
29 #include <asm/powernv.h>
30 #include <asm/sections.h>
31 #include <asm/trace.h>
32 
33 #include <trace/events/thp.h>
34 
35 unsigned int mmu_pid_bits;
36 unsigned int mmu_base_pid;
37 
native_register_process_table(unsigned long base,unsigned long pg_sz,unsigned long table_size)38 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
39 					 unsigned long table_size)
40 {
41 	unsigned long patb0, patb1;
42 
43 	patb0 = be64_to_cpu(partition_tb[0].patb0);
44 	patb1 = base | table_size | PATB_GR;
45 
46 	mmu_partition_table_set_entry(0, patb0, patb1);
47 
48 	return 0;
49 }
50 
early_alloc_pgtable(unsigned long size,int nid,unsigned long region_start,unsigned long region_end)51 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
52 			unsigned long region_start, unsigned long region_end)
53 {
54 	unsigned long pa = 0;
55 	void *pt;
56 
57 	if (region_start || region_end) /* has region hint */
58 		pa = memblock_alloc_range(size, size, region_start, region_end,
59 						MEMBLOCK_NONE);
60 	else if (nid != -1) /* has node hint */
61 		pa = memblock_alloc_base_nid(size, size,
62 						MEMBLOCK_ALLOC_ANYWHERE,
63 						nid, MEMBLOCK_NONE);
64 
65 	if (!pa)
66 		pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE);
67 
68 	BUG_ON(!pa);
69 
70 	pt = __va(pa);
71 	memset(pt, 0, size);
72 
73 	return pt;
74 }
75 
early_map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)76 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
77 			  pgprot_t flags,
78 			  unsigned int map_page_size,
79 			  int nid,
80 			  unsigned long region_start, unsigned long region_end)
81 {
82 	unsigned long pfn = pa >> PAGE_SHIFT;
83 	pgd_t *pgdp;
84 	pud_t *pudp;
85 	pmd_t *pmdp;
86 	pte_t *ptep;
87 
88 	pgdp = pgd_offset_k(ea);
89 	if (pgd_none(*pgdp)) {
90 		pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
91 						region_start, region_end);
92 		pgd_populate(&init_mm, pgdp, pudp);
93 	}
94 	pudp = pud_offset(pgdp, ea);
95 	if (map_page_size == PUD_SIZE) {
96 		ptep = (pte_t *)pudp;
97 		goto set_the_pte;
98 	}
99 	if (pud_none(*pudp)) {
100 		pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
101 						region_start, region_end);
102 		pud_populate(&init_mm, pudp, pmdp);
103 	}
104 	pmdp = pmd_offset(pudp, ea);
105 	if (map_page_size == PMD_SIZE) {
106 		ptep = pmdp_ptep(pmdp);
107 		goto set_the_pte;
108 	}
109 	if (!pmd_present(*pmdp)) {
110 		ptep = early_alloc_pgtable(PAGE_SIZE, nid,
111 						region_start, region_end);
112 		pmd_populate_kernel(&init_mm, pmdp, ptep);
113 	}
114 	ptep = pte_offset_kernel(pmdp, ea);
115 
116 set_the_pte:
117 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
118 	smp_wmb();
119 	return 0;
120 }
121 
122 /*
123  * nid, region_start, and region_end are hints to try to place the page
124  * table memory in the same node or region.
125  */
__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)126 static int __map_kernel_page(unsigned long ea, unsigned long pa,
127 			  pgprot_t flags,
128 			  unsigned int map_page_size,
129 			  int nid,
130 			  unsigned long region_start, unsigned long region_end)
131 {
132 	unsigned long pfn = pa >> PAGE_SHIFT;
133 	pgd_t *pgdp;
134 	pud_t *pudp;
135 	pmd_t *pmdp;
136 	pte_t *ptep;
137 	/*
138 	 * Make sure task size is correct as per the max adddr
139 	 */
140 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
141 
142 	if (unlikely(!slab_is_available()))
143 		return early_map_kernel_page(ea, pa, flags, map_page_size,
144 						nid, region_start, region_end);
145 
146 	/*
147 	 * Should make page table allocation functions be able to take a
148 	 * node, so we can place kernel page tables on the right nodes after
149 	 * boot.
150 	 */
151 	pgdp = pgd_offset_k(ea);
152 	pudp = pud_alloc(&init_mm, pgdp, ea);
153 	if (!pudp)
154 		return -ENOMEM;
155 	if (map_page_size == PUD_SIZE) {
156 		ptep = (pte_t *)pudp;
157 		goto set_the_pte;
158 	}
159 	pmdp = pmd_alloc(&init_mm, pudp, ea);
160 	if (!pmdp)
161 		return -ENOMEM;
162 	if (map_page_size == PMD_SIZE) {
163 		ptep = pmdp_ptep(pmdp);
164 		goto set_the_pte;
165 	}
166 	ptep = pte_alloc_kernel(pmdp, ea);
167 	if (!ptep)
168 		return -ENOMEM;
169 
170 set_the_pte:
171 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
172 	smp_wmb();
173 	return 0;
174 }
175 
radix__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size)176 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
177 			  pgprot_t flags,
178 			  unsigned int map_page_size)
179 {
180 	return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
181 }
182 
183 #ifdef CONFIG_STRICT_KERNEL_RWX
radix__change_memory_range(unsigned long start,unsigned long end,unsigned long clear)184 void radix__change_memory_range(unsigned long start, unsigned long end,
185 				unsigned long clear)
186 {
187 	unsigned long idx;
188 	pgd_t *pgdp;
189 	pud_t *pudp;
190 	pmd_t *pmdp;
191 	pte_t *ptep;
192 
193 	start = ALIGN_DOWN(start, PAGE_SIZE);
194 	end = PAGE_ALIGN(end); // aligns up
195 
196 	pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 		 start, end, clear);
198 
199 	for (idx = start; idx < end; idx += PAGE_SIZE) {
200 		pgdp = pgd_offset_k(idx);
201 		pudp = pud_alloc(&init_mm, pgdp, idx);
202 		if (!pudp)
203 			continue;
204 		if (pud_huge(*pudp)) {
205 			ptep = (pte_t *)pudp;
206 			goto update_the_pte;
207 		}
208 		pmdp = pmd_alloc(&init_mm, pudp, idx);
209 		if (!pmdp)
210 			continue;
211 		if (pmd_huge(*pmdp)) {
212 			ptep = pmdp_ptep(pmdp);
213 			goto update_the_pte;
214 		}
215 		ptep = pte_alloc_kernel(pmdp, idx);
216 		if (!ptep)
217 			continue;
218 update_the_pte:
219 		radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
220 	}
221 
222 	radix__flush_tlb_kernel_range(start, end);
223 }
224 
radix__mark_rodata_ro(void)225 void radix__mark_rodata_ro(void)
226 {
227 	unsigned long start, end;
228 
229 	start = (unsigned long)_stext;
230 	end = (unsigned long)__init_begin;
231 
232 	radix__change_memory_range(start, end, _PAGE_WRITE);
233 }
234 
radix__mark_initmem_nx(void)235 void radix__mark_initmem_nx(void)
236 {
237 	unsigned long start = (unsigned long)__init_begin;
238 	unsigned long end = (unsigned long)__init_end;
239 
240 	radix__change_memory_range(start, end, _PAGE_EXEC);
241 }
242 #endif /* CONFIG_STRICT_KERNEL_RWX */
243 
print_mapping(unsigned long start,unsigned long end,unsigned long size)244 static inline void __meminit print_mapping(unsigned long start,
245 					   unsigned long end,
246 					   unsigned long size)
247 {
248 	char buf[10];
249 
250 	if (end <= start)
251 		return;
252 
253 	string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254 
255 	pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
256 }
257 
create_physical_mapping(unsigned long start,unsigned long end,int nid)258 static int __meminit create_physical_mapping(unsigned long start,
259 					     unsigned long end,
260 					     int nid)
261 {
262 	unsigned long vaddr, addr, mapping_size = 0;
263 	pgprot_t prot;
264 	unsigned long max_mapping_size;
265 #ifdef CONFIG_STRICT_KERNEL_RWX
266 	int split_text_mapping = 1;
267 #else
268 	int split_text_mapping = 0;
269 #endif
270 	int psize;
271 
272 	start = _ALIGN_UP(start, PAGE_SIZE);
273 	for (addr = start; addr < end; addr += mapping_size) {
274 		unsigned long gap, previous_size;
275 		int rc;
276 
277 		gap = end - addr;
278 		previous_size = mapping_size;
279 		max_mapping_size = PUD_SIZE;
280 
281 retry:
282 		if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
283 		    mmu_psize_defs[MMU_PAGE_1G].shift &&
284 		    PUD_SIZE <= max_mapping_size) {
285 			mapping_size = PUD_SIZE;
286 			psize = MMU_PAGE_1G;
287 		} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
288 			   mmu_psize_defs[MMU_PAGE_2M].shift) {
289 			mapping_size = PMD_SIZE;
290 			psize = MMU_PAGE_2M;
291 		} else {
292 			mapping_size = PAGE_SIZE;
293 			psize = mmu_virtual_psize;
294 		}
295 
296 		if (split_text_mapping && (mapping_size == PUD_SIZE) &&
297 			(addr < __pa_symbol(__init_begin)) &&
298 			(addr + mapping_size) > __pa_symbol(__init_begin)) {
299 			max_mapping_size = PMD_SIZE;
300 			goto retry;
301 		}
302 
303 		if (split_text_mapping && (mapping_size == PMD_SIZE) &&
304 		    (addr < __pa_symbol(__init_begin)) &&
305 		    (addr + mapping_size) > __pa_symbol(__init_begin)) {
306 			mapping_size = PAGE_SIZE;
307 			psize = mmu_virtual_psize;
308 		}
309 
310 		if (mapping_size != previous_size) {
311 			print_mapping(start, addr, previous_size);
312 			start = addr;
313 		}
314 
315 		vaddr = (unsigned long)__va(addr);
316 
317 		if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
318 		    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
319 			prot = PAGE_KERNEL_X;
320 		else
321 			prot = PAGE_KERNEL;
322 
323 		rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
324 		if (rc)
325 			return rc;
326 
327 		update_page_count(psize, 1);
328 	}
329 
330 	print_mapping(start, addr, mapping_size);
331 	return 0;
332 }
333 
radix_init_pgtable(void)334 void __init radix_init_pgtable(void)
335 {
336 	unsigned long rts_field;
337 	struct memblock_region *reg;
338 
339 	/* We don't support slb for radix */
340 	mmu_slb_size = 0;
341 	/*
342 	 * Create the linear mapping, using standard page size for now
343 	 */
344 	for_each_memblock(memory, reg) {
345 		/*
346 		 * The memblock allocator  is up at this point, so the
347 		 * page tables will be allocated within the range. No
348 		 * need or a node (which we don't have yet).
349 		 */
350 		WARN_ON(create_physical_mapping(reg->base,
351 						reg->base + reg->size,
352 						-1));
353 	}
354 
355 	/* Find out how many PID bits are supported */
356 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
357 		if (!mmu_pid_bits)
358 			mmu_pid_bits = 20;
359 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
360 		/*
361 		 * When KVM is possible, we only use the top half of the
362 		 * PID space to avoid collisions between host and guest PIDs
363 		 * which can cause problems due to prefetch when exiting the
364 		 * guest with AIL=3
365 		 */
366 		mmu_base_pid = 1 << (mmu_pid_bits - 1);
367 #else
368 		mmu_base_pid = 1;
369 #endif
370 	} else {
371 		/* The guest uses the bottom half of the PID space */
372 		if (!mmu_pid_bits)
373 			mmu_pid_bits = 19;
374 		mmu_base_pid = 1;
375 	}
376 
377 	/*
378 	 * Allocate Partition table and process table for the
379 	 * host.
380 	 */
381 	BUG_ON(PRTB_SIZE_SHIFT > 36);
382 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
383 	/*
384 	 * Fill in the process table.
385 	 */
386 	rts_field = radix__get_tree_size();
387 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
388 	/*
389 	 * Fill in the partition table. We are suppose to use effective address
390 	 * of process table here. But our linear mapping also enable us to use
391 	 * physical address here.
392 	 */
393 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
394 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
395 	asm volatile("ptesync" : : : "memory");
396 	asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
397 		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
398 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
399 	trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
400 
401 	/*
402 	 * The init_mm context is given the first available (non-zero) PID,
403 	 * which is the "guard PID" and contains no page table. PIDR should
404 	 * never be set to zero because that duplicates the kernel address
405 	 * space at the 0x0... offset (quadrant 0)!
406 	 *
407 	 * An arbitrary PID that may later be allocated by the PID allocator
408 	 * for userspace processes must not be used either, because that
409 	 * would cause stale user mappings for that PID on CPUs outside of
410 	 * the TLB invalidation scheme (because it won't be in mm_cpumask).
411 	 *
412 	 * So permanently carve out one PID for the purpose of a guard PID.
413 	 */
414 	init_mm.context.id = mmu_base_pid;
415 	mmu_base_pid++;
416 }
417 
radix_init_partition_table(void)418 static void __init radix_init_partition_table(void)
419 {
420 	unsigned long rts_field, dw0;
421 
422 	mmu_partition_table_init();
423 	rts_field = radix__get_tree_size();
424 	dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
425 	mmu_partition_table_set_entry(0, dw0, 0);
426 
427 	pr_info("Initializing Radix MMU\n");
428 	pr_info("Partition table %p\n", partition_tb);
429 }
430 
radix_init_native(void)431 void __init radix_init_native(void)
432 {
433 	register_process_table = native_register_process_table;
434 }
435 
get_idx_from_shift(unsigned int shift)436 static int __init get_idx_from_shift(unsigned int shift)
437 {
438 	int idx = -1;
439 
440 	switch (shift) {
441 	case 0xc:
442 		idx = MMU_PAGE_4K;
443 		break;
444 	case 0x10:
445 		idx = MMU_PAGE_64K;
446 		break;
447 	case 0x15:
448 		idx = MMU_PAGE_2M;
449 		break;
450 	case 0x1e:
451 		idx = MMU_PAGE_1G;
452 		break;
453 	}
454 	return idx;
455 }
456 
radix_dt_scan_page_sizes(unsigned long node,const char * uname,int depth,void * data)457 static int __init radix_dt_scan_page_sizes(unsigned long node,
458 					   const char *uname, int depth,
459 					   void *data)
460 {
461 	int size = 0;
462 	int shift, idx;
463 	unsigned int ap;
464 	const __be32 *prop;
465 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
466 
467 	/* We are scanning "cpu" nodes only */
468 	if (type == NULL || strcmp(type, "cpu") != 0)
469 		return 0;
470 
471 	/* Find MMU PID size */
472 	prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
473 	if (prop && size == 4)
474 		mmu_pid_bits = be32_to_cpup(prop);
475 
476 	/* Grab page size encodings */
477 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
478 	if (!prop)
479 		return 0;
480 
481 	pr_info("Page sizes from device-tree:\n");
482 	for (; size >= 4; size -= 4, ++prop) {
483 
484 		struct mmu_psize_def *def;
485 
486 		/* top 3 bit is AP encoding */
487 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
488 		ap = be32_to_cpu(prop[0]) >> 29;
489 		pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
490 
491 		idx = get_idx_from_shift(shift);
492 		if (idx < 0)
493 			continue;
494 
495 		def = &mmu_psize_defs[idx];
496 		def->shift = shift;
497 		def->ap  = ap;
498 	}
499 
500 	/* needed ? */
501 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
502 	return 1;
503 }
504 
radix__early_init_devtree(void)505 void __init radix__early_init_devtree(void)
506 {
507 	int rc;
508 
509 	/*
510 	 * Try to find the available page sizes in the device-tree
511 	 */
512 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
513 	if (rc != 0)  /* Found */
514 		goto found;
515 	/*
516 	 * let's assume we have page 4k and 64k support
517 	 */
518 	mmu_psize_defs[MMU_PAGE_4K].shift = 12;
519 	mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
520 
521 	mmu_psize_defs[MMU_PAGE_64K].shift = 16;
522 	mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
523 found:
524 	return;
525 }
526 
radix_init_amor(void)527 static void radix_init_amor(void)
528 {
529 	/*
530 	* In HV mode, we init AMOR (Authority Mask Override Register) so that
531 	* the hypervisor and guest can setup IAMR (Instruction Authority Mask
532 	* Register), enable key 0 and set it to 1.
533 	*
534 	* AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
535 	*/
536 	mtspr(SPRN_AMOR, (3ul << 62));
537 }
538 
radix_init_iamr(void)539 static void radix_init_iamr(void)
540 {
541 	/*
542 	 * Radix always uses key0 of the IAMR to determine if an access is
543 	 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
544 	 * fetch.
545 	 */
546 	mtspr(SPRN_IAMR, (1ul << 62));
547 }
548 
radix__early_init_mmu(void)549 void __init radix__early_init_mmu(void)
550 {
551 	unsigned long lpcr;
552 
553 #ifdef CONFIG_PPC_64K_PAGES
554 	/* PAGE_SIZE mappings */
555 	mmu_virtual_psize = MMU_PAGE_64K;
556 #else
557 	mmu_virtual_psize = MMU_PAGE_4K;
558 #endif
559 
560 #ifdef CONFIG_SPARSEMEM_VMEMMAP
561 	/* vmemmap mapping */
562 	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
563 		/*
564 		 * map vmemmap using 2M if available
565 		 */
566 		mmu_vmemmap_psize = MMU_PAGE_2M;
567 	} else
568 		mmu_vmemmap_psize = mmu_virtual_psize;
569 #endif
570 	/*
571 	 * initialize page table size
572 	 */
573 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
574 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
575 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
576 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
577 	__pud_cache_index = RADIX_PUD_INDEX_SIZE;
578 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
579 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
580 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
581 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
582 
583 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
584 	__pud_val_bits = RADIX_PUD_VAL_BITS;
585 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
586 
587 	__kernel_virt_start = RADIX_KERN_VIRT_START;
588 	__kernel_virt_size = RADIX_KERN_VIRT_SIZE;
589 	__vmalloc_start = RADIX_VMALLOC_START;
590 	__vmalloc_end = RADIX_VMALLOC_END;
591 	__kernel_io_start = RADIX_KERN_IO_START;
592 	vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
593 	ioremap_bot = IOREMAP_BASE;
594 
595 #ifdef CONFIG_PCI
596 	pci_io_base = ISA_IO_BASE;
597 #endif
598 	__pte_frag_nr = RADIX_PTE_FRAG_NR;
599 	__pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
600 	__pmd_frag_nr = RADIX_PMD_FRAG_NR;
601 	__pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
602 
603 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
604 		radix_init_native();
605 		lpcr = mfspr(SPRN_LPCR);
606 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
607 		radix_init_partition_table();
608 		radix_init_amor();
609 	} else {
610 		radix_init_pseries();
611 	}
612 
613 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
614 
615 	radix_init_iamr();
616 	radix_init_pgtable();
617 	/* Switch to the guard PID before turning on MMU */
618 	radix__switch_mmu_context(NULL, &init_mm);
619 	if (cpu_has_feature(CPU_FTR_HVMODE))
620 		tlbiel_all();
621 }
622 
radix__early_init_mmu_secondary(void)623 void radix__early_init_mmu_secondary(void)
624 {
625 	unsigned long lpcr;
626 	/*
627 	 * update partition table control register and UPRT
628 	 */
629 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
630 		lpcr = mfspr(SPRN_LPCR);
631 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
632 
633 		mtspr(SPRN_PTCR,
634 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
635 		radix_init_amor();
636 	}
637 	radix_init_iamr();
638 
639 	radix__switch_mmu_context(NULL, &init_mm);
640 	if (cpu_has_feature(CPU_FTR_HVMODE))
641 		tlbiel_all();
642 }
643 
radix__mmu_cleanup_all(void)644 void radix__mmu_cleanup_all(void)
645 {
646 	unsigned long lpcr;
647 
648 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
649 		lpcr = mfspr(SPRN_LPCR);
650 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
651 		mtspr(SPRN_PTCR, 0);
652 		powernv_set_nmmu_ptcr(0);
653 		radix__flush_tlb_all();
654 	}
655 }
656 
radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,phys_addr_t first_memblock_size)657 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
658 				phys_addr_t first_memblock_size)
659 {
660 	/* We don't currently support the first MEMBLOCK not mapping 0
661 	 * physical on those processors
662 	 */
663 	BUG_ON(first_memblock_base != 0);
664 
665 	/*
666 	 * Radix mode is not limited by RMA / VRMA addressing.
667 	 */
668 	ppc64_rma_size = ULONG_MAX;
669 }
670 
671 #ifdef CONFIG_MEMORY_HOTPLUG
free_pte_table(pte_t * pte_start,pmd_t * pmd)672 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
673 {
674 	pte_t *pte;
675 	int i;
676 
677 	for (i = 0; i < PTRS_PER_PTE; i++) {
678 		pte = pte_start + i;
679 		if (!pte_none(*pte))
680 			return;
681 	}
682 
683 	pte_free_kernel(&init_mm, pte_start);
684 	pmd_clear(pmd);
685 }
686 
free_pmd_table(pmd_t * pmd_start,pud_t * pud)687 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
688 {
689 	pmd_t *pmd;
690 	int i;
691 
692 	for (i = 0; i < PTRS_PER_PMD; i++) {
693 		pmd = pmd_start + i;
694 		if (!pmd_none(*pmd))
695 			return;
696 	}
697 
698 	pmd_free(&init_mm, pmd_start);
699 	pud_clear(pud);
700 }
701 
702 struct change_mapping_params {
703 	pte_t *pte;
704 	unsigned long start;
705 	unsigned long end;
706 	unsigned long aligned_start;
707 	unsigned long aligned_end;
708 };
709 
stop_machine_change_mapping(void * data)710 static int __meminit stop_machine_change_mapping(void *data)
711 {
712 	struct change_mapping_params *params =
713 			(struct change_mapping_params *)data;
714 
715 	if (!data)
716 		return -1;
717 
718 	spin_unlock(&init_mm.page_table_lock);
719 	pte_clear(&init_mm, params->aligned_start, params->pte);
720 	create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1);
721 	create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1);
722 	spin_lock(&init_mm.page_table_lock);
723 	return 0;
724 }
725 
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end)726 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
727 			     unsigned long end)
728 {
729 	unsigned long next;
730 	pte_t *pte;
731 
732 	pte = pte_start + pte_index(addr);
733 	for (; addr < end; addr = next, pte++) {
734 		next = (addr + PAGE_SIZE) & PAGE_MASK;
735 		if (next > end)
736 			next = end;
737 
738 		if (!pte_present(*pte))
739 			continue;
740 
741 		if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
742 			/*
743 			 * The vmemmap_free() and remove_section_mapping()
744 			 * codepaths call us with aligned addresses.
745 			 */
746 			WARN_ONCE(1, "%s: unaligned range\n", __func__);
747 			continue;
748 		}
749 
750 		pte_clear(&init_mm, addr, pte);
751 	}
752 }
753 
754 /*
755  * clear the pte and potentially split the mapping helper
756  */
split_kernel_mapping(unsigned long addr,unsigned long end,unsigned long size,pte_t * pte)757 static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
758 				unsigned long size, pte_t *pte)
759 {
760 	unsigned long mask = ~(size - 1);
761 	unsigned long aligned_start = addr & mask;
762 	unsigned long aligned_end = addr + size;
763 	struct change_mapping_params params;
764 	bool split_region = false;
765 
766 	if ((end - addr) < size) {
767 		/*
768 		 * We're going to clear the PTE, but not flushed
769 		 * the mapping, time to remap and flush. The
770 		 * effects if visible outside the processor or
771 		 * if we are running in code close to the
772 		 * mapping we cleared, we are in trouble.
773 		 */
774 		if (overlaps_kernel_text(aligned_start, addr) ||
775 			overlaps_kernel_text(end, aligned_end)) {
776 			/*
777 			 * Hack, just return, don't pte_clear
778 			 */
779 			WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
780 				  "text, not splitting\n", addr, end);
781 			return;
782 		}
783 		split_region = true;
784 	}
785 
786 	if (split_region) {
787 		params.pte = pte;
788 		params.start = addr;
789 		params.end = end;
790 		params.aligned_start = addr & ~(size - 1);
791 		params.aligned_end = min_t(unsigned long, aligned_end,
792 				(unsigned long)__va(memblock_end_of_DRAM()));
793 		stop_machine(stop_machine_change_mapping, &params, NULL);
794 		return;
795 	}
796 
797 	pte_clear(&init_mm, addr, pte);
798 }
799 
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end)800 static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
801 			     unsigned long end)
802 {
803 	unsigned long next;
804 	pte_t *pte_base;
805 	pmd_t *pmd;
806 
807 	pmd = pmd_start + pmd_index(addr);
808 	for (; addr < end; addr = next, pmd++) {
809 		next = pmd_addr_end(addr, end);
810 
811 		if (!pmd_present(*pmd))
812 			continue;
813 
814 		if (pmd_huge(*pmd)) {
815 			split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
816 			continue;
817 		}
818 
819 		pte_base = (pte_t *)pmd_page_vaddr(*pmd);
820 		remove_pte_table(pte_base, addr, next);
821 		free_pte_table(pte_base, pmd);
822 	}
823 }
824 
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end)825 static void remove_pud_table(pud_t *pud_start, unsigned long addr,
826 			     unsigned long end)
827 {
828 	unsigned long next;
829 	pmd_t *pmd_base;
830 	pud_t *pud;
831 
832 	pud = pud_start + pud_index(addr);
833 	for (; addr < end; addr = next, pud++) {
834 		next = pud_addr_end(addr, end);
835 
836 		if (!pud_present(*pud))
837 			continue;
838 
839 		if (pud_huge(*pud)) {
840 			split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
841 			continue;
842 		}
843 
844 		pmd_base = (pmd_t *)pud_page_vaddr(*pud);
845 		remove_pmd_table(pmd_base, addr, next);
846 		free_pmd_table(pmd_base, pud);
847 	}
848 }
849 
remove_pagetable(unsigned long start,unsigned long end)850 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
851 {
852 	unsigned long addr, next;
853 	pud_t *pud_base;
854 	pgd_t *pgd;
855 
856 	spin_lock(&init_mm.page_table_lock);
857 
858 	for (addr = start; addr < end; addr = next) {
859 		next = pgd_addr_end(addr, end);
860 
861 		pgd = pgd_offset_k(addr);
862 		if (!pgd_present(*pgd))
863 			continue;
864 
865 		if (pgd_huge(*pgd)) {
866 			split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
867 			continue;
868 		}
869 
870 		pud_base = (pud_t *)pgd_page_vaddr(*pgd);
871 		remove_pud_table(pud_base, addr, next);
872 	}
873 
874 	spin_unlock(&init_mm.page_table_lock);
875 	radix__flush_tlb_kernel_range(start, end);
876 }
877 
radix__create_section_mapping(unsigned long start,unsigned long end,int nid)878 int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
879 {
880 	return create_physical_mapping(start, end, nid);
881 }
882 
radix__remove_section_mapping(unsigned long start,unsigned long end)883 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
884 {
885 	remove_pagetable(start, end);
886 	return 0;
887 }
888 #endif /* CONFIG_MEMORY_HOTPLUG */
889 
890 #ifdef CONFIG_SPARSEMEM_VMEMMAP
__map_kernel_page_nid(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid)891 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
892 				 pgprot_t flags, unsigned int map_page_size,
893 				 int nid)
894 {
895 	return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
896 }
897 
radix__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)898 int __meminit radix__vmemmap_create_mapping(unsigned long start,
899 				      unsigned long page_size,
900 				      unsigned long phys)
901 {
902 	/* Create a PTE encoding */
903 	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
904 	int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
905 	int ret;
906 
907 	ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
908 	BUG_ON(ret);
909 
910 	return 0;
911 }
912 
913 #ifdef CONFIG_MEMORY_HOTPLUG
radix__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)914 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
915 {
916 	remove_pagetable(start, start + page_size);
917 }
918 #endif
919 #endif
920 
921 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
922 
radix__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)923 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
924 				  pmd_t *pmdp, unsigned long clr,
925 				  unsigned long set)
926 {
927 	unsigned long old;
928 
929 #ifdef CONFIG_DEBUG_VM
930 	WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
931 	assert_spin_locked(pmd_lockptr(mm, pmdp));
932 #endif
933 
934 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
935 	trace_hugepage_update(addr, old, clr, set);
936 
937 	return old;
938 }
939 
radix__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)940 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
941 			pmd_t *pmdp)
942 
943 {
944 	pmd_t pmd;
945 
946 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
947 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
948 	VM_BUG_ON(pmd_devmap(*pmdp));
949 	/*
950 	 * khugepaged calls this for normal pmd
951 	 */
952 	pmd = *pmdp;
953 	pmd_clear(pmdp);
954 
955 	/*FIXME!!  Verify whether we need this kick below */
956 	serialize_against_pte_lookup(vma->vm_mm);
957 
958 	radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
959 
960 	return pmd;
961 }
962 
963 /*
964  * For us pgtable_t is pte_t *. Inorder to save the deposisted
965  * page table, we consider the allocated page table as a list
966  * head. On withdraw we need to make sure we zero out the used
967  * list_head memory area.
968  */
radix__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)969 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
970 				 pgtable_t pgtable)
971 {
972         struct list_head *lh = (struct list_head *) pgtable;
973 
974         assert_spin_locked(pmd_lockptr(mm, pmdp));
975 
976         /* FIFO */
977         if (!pmd_huge_pte(mm, pmdp))
978                 INIT_LIST_HEAD(lh);
979         else
980                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
981         pmd_huge_pte(mm, pmdp) = pgtable;
982 }
983 
radix__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)984 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
985 {
986         pte_t *ptep;
987         pgtable_t pgtable;
988         struct list_head *lh;
989 
990         assert_spin_locked(pmd_lockptr(mm, pmdp));
991 
992         /* FIFO */
993         pgtable = pmd_huge_pte(mm, pmdp);
994         lh = (struct list_head *) pgtable;
995         if (list_empty(lh))
996                 pmd_huge_pte(mm, pmdp) = NULL;
997         else {
998                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
999                 list_del(lh);
1000         }
1001         ptep = (pte_t *) pgtable;
1002         *ptep = __pte(0);
1003         ptep++;
1004         *ptep = __pte(0);
1005         return pgtable;
1006 }
1007 
1008 
radix__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1009 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1010 			       unsigned long addr, pmd_t *pmdp)
1011 {
1012 	pmd_t old_pmd;
1013 	unsigned long old;
1014 
1015 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1016 	old_pmd = __pmd(old);
1017 	/*
1018 	 * Serialize against find_current_mm_pte which does lock-less
1019 	 * lookup in page tables with local interrupts disabled. For huge pages
1020 	 * it casts pmd_t to pte_t. Since format of pte_t is different from
1021 	 * pmd_t we want to prevent transit from pmd pointing to page table
1022 	 * to pmd pointing to huge page (and back) while interrupts are disabled.
1023 	 * We clear pmd to possibly replace it with page table pointer in
1024 	 * different code paths. So make sure we wait for the parallel
1025 	 * find_current_mm_pte to finish.
1026 	 */
1027 	serialize_against_pte_lookup(mm);
1028 	return old_pmd;
1029 }
1030 
radix__has_transparent_hugepage(void)1031 int radix__has_transparent_hugepage(void)
1032 {
1033 	/* For radix 2M at PMD level means thp */
1034 	if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1035 		return 1;
1036 	return 0;
1037 }
1038 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1039 
radix__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)1040 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1041 				  pte_t entry, unsigned long address, int psize)
1042 {
1043 	struct mm_struct *mm = vma->vm_mm;
1044 	unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1045 					      _PAGE_RW | _PAGE_EXEC);
1046 
1047 	unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1048 	/*
1049 	 * To avoid NMMU hang while relaxing access, we need mark
1050 	 * the pte invalid in between.
1051 	 */
1052 	if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1053 		unsigned long old_pte, new_pte;
1054 
1055 		old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1056 		/*
1057 		 * new value of pte
1058 		 */
1059 		new_pte = old_pte | set;
1060 		radix__flush_tlb_page_psize(mm, address, psize);
1061 		__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1062 	} else {
1063 		__radix_pte_update(ptep, 0, set);
1064 		/*
1065 		 * Book3S does not require a TLB flush when relaxing access
1066 		 * restrictions when the address space is not attached to a
1067 		 * NMMU, because the core MMU will reload the pte after taking
1068 		 * an access fault, which is defined by the architectue.
1069 		 */
1070 	}
1071 	/* See ptesync comment in radix__set_pte_at */
1072 }
1073