• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Page table handling routines for radix page table.
3  *
4  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/sched.h>
12 #include <linux/memblock.h>
13 #include <linux/of_fdt.h>
14 
15 #include <asm/pgtable.h>
16 #include <asm/pgalloc.h>
17 #include <asm/dma.h>
18 #include <asm/machdep.h>
19 #include <asm/mmu.h>
20 #include <asm/firmware.h>
21 
22 #include <trace/events/thp.h>
23 
native_register_process_table(unsigned long base,unsigned long pg_sz,unsigned long table_size)24 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
25 					 unsigned long table_size)
26 {
27 	unsigned long patb1 = base | table_size | PATB_GR;
28 
29 	partition_tb->patb1 = cpu_to_be64(patb1);
30 	return 0;
31 }
32 
early_alloc_pgtable(unsigned long size)33 static __ref void *early_alloc_pgtable(unsigned long size)
34 {
35 	void *pt;
36 
37 	pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE));
38 	memset(pt, 0, size);
39 
40 	return pt;
41 }
42 
radix__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size)43 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
44 			  pgprot_t flags,
45 			  unsigned int map_page_size)
46 {
47 	pgd_t *pgdp;
48 	pud_t *pudp;
49 	pmd_t *pmdp;
50 	pte_t *ptep;
51 	/*
52 	 * Make sure task size is correct as per the max adddr
53 	 */
54 	BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
55 	if (slab_is_available()) {
56 		pgdp = pgd_offset_k(ea);
57 		pudp = pud_alloc(&init_mm, pgdp, ea);
58 		if (!pudp)
59 			return -ENOMEM;
60 		if (map_page_size == PUD_SIZE) {
61 			ptep = (pte_t *)pudp;
62 			goto set_the_pte;
63 		}
64 		pmdp = pmd_alloc(&init_mm, pudp, ea);
65 		if (!pmdp)
66 			return -ENOMEM;
67 		if (map_page_size == PMD_SIZE) {
68 			ptep = pmdp_ptep(pmdp);
69 			goto set_the_pte;
70 		}
71 		ptep = pte_alloc_kernel(pmdp, ea);
72 		if (!ptep)
73 			return -ENOMEM;
74 	} else {
75 		pgdp = pgd_offset_k(ea);
76 		if (pgd_none(*pgdp)) {
77 			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
78 			BUG_ON(pudp == NULL);
79 			pgd_populate(&init_mm, pgdp, pudp);
80 		}
81 		pudp = pud_offset(pgdp, ea);
82 		if (map_page_size == PUD_SIZE) {
83 			ptep = (pte_t *)pudp;
84 			goto set_the_pte;
85 		}
86 		if (pud_none(*pudp)) {
87 			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
88 			BUG_ON(pmdp == NULL);
89 			pud_populate(&init_mm, pudp, pmdp);
90 		}
91 		pmdp = pmd_offset(pudp, ea);
92 		if (map_page_size == PMD_SIZE) {
93 			ptep = pmdp_ptep(pmdp);
94 			goto set_the_pte;
95 		}
96 		if (!pmd_present(*pmdp)) {
97 			ptep = early_alloc_pgtable(PAGE_SIZE);
98 			BUG_ON(ptep == NULL);
99 			pmd_populate_kernel(&init_mm, pmdp, ptep);
100 		}
101 		ptep = pte_offset_kernel(pmdp, ea);
102 	}
103 
104 set_the_pte:
105 	set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags));
106 	smp_wmb();
107 	return 0;
108 }
109 
radix_init_pgtable(void)110 static void __init radix_init_pgtable(void)
111 {
112 	int loop_count;
113 	u64 base, end, start_addr;
114 	unsigned long rts_field;
115 	struct memblock_region *reg;
116 	unsigned long linear_page_size;
117 
118 	/* We don't support slb for radix */
119 	mmu_slb_size = 0;
120 	/*
121 	 * Create the linear mapping, using standard page size for now
122 	 */
123 	loop_count = 0;
124 	for_each_memblock(memory, reg) {
125 
126 		start_addr = reg->base;
127 
128 redo:
129 		if (loop_count < 1 && mmu_psize_defs[MMU_PAGE_1G].shift)
130 			linear_page_size = PUD_SIZE;
131 		else if (loop_count < 2 && mmu_psize_defs[MMU_PAGE_2M].shift)
132 			linear_page_size = PMD_SIZE;
133 		else
134 			linear_page_size = PAGE_SIZE;
135 
136 		base = _ALIGN_UP(start_addr, linear_page_size);
137 		end = _ALIGN_DOWN(reg->base + reg->size, linear_page_size);
138 
139 		pr_info("Mapping range 0x%lx - 0x%lx with 0x%lx\n",
140 			(unsigned long)base, (unsigned long)end,
141 			linear_page_size);
142 
143 		while (base < end) {
144 			radix__map_kernel_page((unsigned long)__va(base),
145 					      base, PAGE_KERNEL_X,
146 					      linear_page_size);
147 			base += linear_page_size;
148 		}
149 		/*
150 		 * map the rest using lower page size
151 		 */
152 		if (end < reg->base + reg->size) {
153 			start_addr = end;
154 			loop_count++;
155 			goto redo;
156 		}
157 	}
158 	/*
159 	 * Allocate Partition table and process table for the
160 	 * host.
161 	 */
162 	BUILD_BUG_ON_MSG((PRTB_SIZE_SHIFT > 36), "Process table size too large.");
163 	process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
164 	/*
165 	 * Fill in the process table.
166 	 */
167 	rts_field = radix__get_tree_size();
168 	process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
169 	/*
170 	 * Fill in the partition table. We are suppose to use effective address
171 	 * of process table here. But our linear mapping also enable us to use
172 	 * physical address here.
173 	 */
174 	register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
175 	pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
176 	asm volatile("ptesync" : : : "memory");
177 	asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
178 		     "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
179 	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
180 }
181 
radix_init_partition_table(void)182 static void __init radix_init_partition_table(void)
183 {
184 	unsigned long rts_field;
185 
186 	rts_field = radix__get_tree_size();
187 
188 	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
189 	partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
190 	partition_tb->patb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) |
191 					  RADIX_PGD_INDEX_SIZE | PATB_HR);
192 	pr_info("Initializing Radix MMU\n");
193 	pr_info("Partition table %p\n", partition_tb);
194 
195 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
196 	/*
197 	 * update partition table control register,
198 	 * 64 K size.
199 	 */
200 	mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
201 }
202 
radix_init_native(void)203 void __init radix_init_native(void)
204 {
205 	register_process_table = native_register_process_table;
206 }
207 
get_idx_from_shift(unsigned int shift)208 static int __init get_idx_from_shift(unsigned int shift)
209 {
210 	int idx = -1;
211 
212 	switch (shift) {
213 	case 0xc:
214 		idx = MMU_PAGE_4K;
215 		break;
216 	case 0x10:
217 		idx = MMU_PAGE_64K;
218 		break;
219 	case 0x15:
220 		idx = MMU_PAGE_2M;
221 		break;
222 	case 0x1e:
223 		idx = MMU_PAGE_1G;
224 		break;
225 	}
226 	return idx;
227 }
228 
radix_dt_scan_page_sizes(unsigned long node,const char * uname,int depth,void * data)229 static int __init radix_dt_scan_page_sizes(unsigned long node,
230 					   const char *uname, int depth,
231 					   void *data)
232 {
233 	int size = 0;
234 	int shift, idx;
235 	unsigned int ap;
236 	const __be32 *prop;
237 	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
238 
239 	/* We are scanning "cpu" nodes only */
240 	if (type == NULL || strcmp(type, "cpu") != 0)
241 		return 0;
242 
243 	prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
244 	if (!prop)
245 		return 0;
246 
247 	pr_info("Page sizes from device-tree:\n");
248 	for (; size >= 4; size -= 4, ++prop) {
249 
250 		struct mmu_psize_def *def;
251 
252 		/* top 3 bit is AP encoding */
253 		shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
254 		ap = be32_to_cpu(prop[0]) >> 29;
255 		pr_info("Page size sift = %d AP=0x%x\n", shift, ap);
256 
257 		idx = get_idx_from_shift(shift);
258 		if (idx < 0)
259 			continue;
260 
261 		def = &mmu_psize_defs[idx];
262 		def->shift = shift;
263 		def->ap  = ap;
264 	}
265 
266 	/* needed ? */
267 	cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
268 	return 1;
269 }
270 
radix__early_init_devtree(void)271 void __init radix__early_init_devtree(void)
272 {
273 	int rc;
274 
275 	/*
276 	 * Try to find the available page sizes in the device-tree
277 	 */
278 	rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
279 	if (rc != 0)  /* Found */
280 		goto found;
281 	/*
282 	 * let's assume we have page 4k and 64k support
283 	 */
284 	mmu_psize_defs[MMU_PAGE_4K].shift = 12;
285 	mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
286 
287 	mmu_psize_defs[MMU_PAGE_64K].shift = 16;
288 	mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
289 found:
290 #ifdef CONFIG_SPARSEMEM_VMEMMAP
291 	if (mmu_psize_defs[MMU_PAGE_2M].shift) {
292 		/*
293 		 * map vmemmap using 2M if available
294 		 */
295 		mmu_vmemmap_psize = MMU_PAGE_2M;
296 	}
297 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
298 	return;
299 }
300 
update_hid_for_radix(void)301 static void update_hid_for_radix(void)
302 {
303 	unsigned long hid0;
304 	unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
305 
306 	asm volatile("ptesync": : :"memory");
307 	/* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
308 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
309 		     : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
310 	/* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
311 	asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
312 		     : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
313 	asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
314 	/*
315 	 * now switch the HID
316 	 */
317 	hid0  = mfspr(SPRN_HID0);
318 	hid0 |= HID0_POWER9_RADIX;
319 	mtspr(SPRN_HID0, hid0);
320 	asm volatile("isync": : :"memory");
321 
322 	/* Wait for it to happen */
323 	while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
324 		cpu_relax();
325 }
326 
radix__early_init_mmu(void)327 void __init radix__early_init_mmu(void)
328 {
329 	unsigned long lpcr;
330 
331 #ifdef CONFIG_PPC_64K_PAGES
332 	/* PAGE_SIZE mappings */
333 	mmu_virtual_psize = MMU_PAGE_64K;
334 #else
335 	mmu_virtual_psize = MMU_PAGE_4K;
336 #endif
337 
338 #ifdef CONFIG_SPARSEMEM_VMEMMAP
339 	/* vmemmap mapping */
340 	mmu_vmemmap_psize = mmu_virtual_psize;
341 #endif
342 	/*
343 	 * initialize page table size
344 	 */
345 	__pte_index_size = RADIX_PTE_INDEX_SIZE;
346 	__pmd_index_size = RADIX_PMD_INDEX_SIZE;
347 	__pud_index_size = RADIX_PUD_INDEX_SIZE;
348 	__pgd_index_size = RADIX_PGD_INDEX_SIZE;
349 	__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
350 	__pte_table_size = RADIX_PTE_TABLE_SIZE;
351 	__pmd_table_size = RADIX_PMD_TABLE_SIZE;
352 	__pud_table_size = RADIX_PUD_TABLE_SIZE;
353 	__pgd_table_size = RADIX_PGD_TABLE_SIZE;
354 
355 	__pmd_val_bits = RADIX_PMD_VAL_BITS;
356 	__pud_val_bits = RADIX_PUD_VAL_BITS;
357 	__pgd_val_bits = RADIX_PGD_VAL_BITS;
358 
359 	__kernel_virt_start = RADIX_KERN_VIRT_START;
360 	__kernel_virt_size = RADIX_KERN_VIRT_SIZE;
361 	__vmalloc_start = RADIX_VMALLOC_START;
362 	__vmalloc_end = RADIX_VMALLOC_END;
363 	vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
364 	ioremap_bot = IOREMAP_BASE;
365 
366 #ifdef CONFIG_PCI
367 	pci_io_base = ISA_IO_BASE;
368 #endif
369 
370 	/*
371 	 * For now radix also use the same frag size
372 	 */
373 	__pte_frag_nr = H_PTE_FRAG_NR;
374 	__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
375 
376 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
377 		radix_init_native();
378 		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
379 			update_hid_for_radix();
380 		lpcr = mfspr(SPRN_LPCR);
381 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
382 		radix_init_partition_table();
383 	}
384 
385 	radix_init_pgtable();
386 }
387 
radix__early_init_mmu_secondary(void)388 void radix__early_init_mmu_secondary(void)
389 {
390 	unsigned long lpcr;
391 	/*
392 	 * update partition table control register and UPRT
393 	 */
394 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
395 
396 		if (cpu_has_feature(CPU_FTR_POWER9_DD1))
397 			update_hid_for_radix();
398 
399 		lpcr = mfspr(SPRN_LPCR);
400 		mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
401 
402 		mtspr(SPRN_PTCR,
403 		      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
404 	}
405 }
406 
radix__mmu_cleanup_all(void)407 void radix__mmu_cleanup_all(void)
408 {
409 	unsigned long lpcr;
410 
411 	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
412 		lpcr = mfspr(SPRN_LPCR);
413 		mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
414 		mtspr(SPRN_PTCR, 0);
415 		radix__flush_tlb_all();
416 	}
417 }
418 
radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,phys_addr_t first_memblock_size)419 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
420 				phys_addr_t first_memblock_size)
421 {
422 	/* We don't currently support the first MEMBLOCK not mapping 0
423 	 * physical on those processors
424 	 */
425 	BUG_ON(first_memblock_base != 0);
426 	/*
427 	 * We limit the allocation that depend on ppc64_rma_size
428 	 * to first_memblock_size. We also clamp it to 1GB to
429 	 * avoid some funky things such as RTAS bugs.
430 	 *
431 	 * On radix config we really don't have a limitation
432 	 * on real mode access. But keeping it as above works
433 	 * well enough.
434 	 */
435 	ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
436 	/*
437 	 * Finally limit subsequent allocations. We really don't want
438 	 * to limit the memblock allocations to rma_size. FIXME!! should
439 	 * we even limit at all ?
440 	 */
441 	memblock_set_current_limit(first_memblock_base + first_memblock_size);
442 }
443 
444 #ifdef CONFIG_SPARSEMEM_VMEMMAP
radix__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)445 int __meminit radix__vmemmap_create_mapping(unsigned long start,
446 				      unsigned long page_size,
447 				      unsigned long phys)
448 {
449 	/* Create a PTE encoding */
450 	unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
451 
452 	BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
453 	return 0;
454 }
455 
456 #ifdef CONFIG_MEMORY_HOTPLUG
radix__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)457 void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
458 {
459 	/* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
460 }
461 #endif
462 #endif
463 
464 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
465 
radix__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)466 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
467 				  pmd_t *pmdp, unsigned long clr,
468 				  unsigned long set)
469 {
470 	unsigned long old;
471 
472 #ifdef CONFIG_DEBUG_VM
473 	WARN_ON(!radix__pmd_trans_huge(*pmdp));
474 	assert_spin_locked(&mm->page_table_lock);
475 #endif
476 
477 	old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
478 	trace_hugepage_update(addr, old, clr, set);
479 
480 	return old;
481 }
482 
radix__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)483 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
484 			pmd_t *pmdp)
485 
486 {
487 	pmd_t pmd;
488 
489 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
490 	VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
491 	/*
492 	 * khugepaged calls this for normal pmd
493 	 */
494 	pmd = *pmdp;
495 	pmd_clear(pmdp);
496 	/*FIXME!!  Verify whether we need this kick below */
497 	kick_all_cpus_sync();
498 	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
499 	return pmd;
500 }
501 
502 /*
503  * For us pgtable_t is pte_t *. Inorder to save the deposisted
504  * page table, we consider the allocated page table as a list
505  * head. On withdraw we need to make sure we zero out the used
506  * list_head memory area.
507  */
radix__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)508 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
509 				 pgtable_t pgtable)
510 {
511         struct list_head *lh = (struct list_head *) pgtable;
512 
513         assert_spin_locked(pmd_lockptr(mm, pmdp));
514 
515         /* FIFO */
516         if (!pmd_huge_pte(mm, pmdp))
517                 INIT_LIST_HEAD(lh);
518         else
519                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
520         pmd_huge_pte(mm, pmdp) = pgtable;
521 }
522 
radix__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)523 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
524 {
525         pte_t *ptep;
526         pgtable_t pgtable;
527         struct list_head *lh;
528 
529         assert_spin_locked(pmd_lockptr(mm, pmdp));
530 
531         /* FIFO */
532         pgtable = pmd_huge_pte(mm, pmdp);
533         lh = (struct list_head *) pgtable;
534         if (list_empty(lh))
535                 pmd_huge_pte(mm, pmdp) = NULL;
536         else {
537                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
538                 list_del(lh);
539         }
540         ptep = (pte_t *) pgtable;
541         *ptep = __pte(0);
542         ptep++;
543         *ptep = __pte(0);
544         return pgtable;
545 }
546 
547 
radix__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)548 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
549 			       unsigned long addr, pmd_t *pmdp)
550 {
551 	pmd_t old_pmd;
552 	unsigned long old;
553 
554 	old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
555 	old_pmd = __pmd(old);
556 	/*
557 	 * Serialize against find_linux_pte_or_hugepte which does lock-less
558 	 * lookup in page tables with local interrupts disabled. For huge pages
559 	 * it casts pmd_t to pte_t. Since format of pte_t is different from
560 	 * pmd_t we want to prevent transit from pmd pointing to page table
561 	 * to pmd pointing to huge page (and back) while interrupts are disabled.
562 	 * We clear pmd to possibly replace it with page table pointer in
563 	 * different code paths. So make sure we wait for the parallel
564 	 * find_linux_pte_or_hugepage to finish.
565 	 */
566 	kick_all_cpus_sync();
567 	return old_pmd;
568 }
569 
radix__has_transparent_hugepage(void)570 int radix__has_transparent_hugepage(void)
571 {
572 	/* For radix 2M at PMD level means thp */
573 	if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
574 		return 1;
575 	return 0;
576 }
577 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
578