1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Page table handling routines for radix page table.
4 *
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 */
7
8 #define pr_fmt(fmt) "radix-mmu: " fmt
9
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
14 #include <linux/of_fdt.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/string_helpers.h>
18 #include <linux/memory.h>
19
20 #include <asm/pgalloc.h>
21 #include <asm/mmu_context.h>
22 #include <asm/dma.h>
23 #include <asm/machdep.h>
24 #include <asm/mmu.h>
25 #include <asm/firmware.h>
26 #include <asm/powernv.h>
27 #include <asm/sections.h>
28 #include <asm/smp.h>
29 #include <asm/trace.h>
30 #include <asm/uaccess.h>
31 #include <asm/ultravisor.h>
32
33 #include <trace/events/thp.h>
34
35 unsigned int mmu_pid_bits;
36 unsigned int mmu_base_pid;
37 unsigned long radix_mem_block_size __ro_after_init;
38
early_alloc_pgtable(unsigned long size,int nid,unsigned long region_start,unsigned long region_end)39 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
40 unsigned long region_start, unsigned long region_end)
41 {
42 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
43 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
44 void *ptr;
45
46 if (region_start)
47 min_addr = region_start;
48 if (region_end)
49 max_addr = region_end;
50
51 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
52
53 if (!ptr)
54 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
55 __func__, size, size, nid, &min_addr, &max_addr);
56
57 return ptr;
58 }
59
60 /*
61 * When allocating pud or pmd pointers, we allocate a complete page
62 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
63 * is to ensure that the page obtained from the memblock allocator
64 * can be completely used as page table page and can be freed
65 * correctly when the page table entries are removed.
66 */
early_map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)67 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
68 pgprot_t flags,
69 unsigned int map_page_size,
70 int nid,
71 unsigned long region_start, unsigned long region_end)
72 {
73 unsigned long pfn = pa >> PAGE_SHIFT;
74 pgd_t *pgdp;
75 p4d_t *p4dp;
76 pud_t *pudp;
77 pmd_t *pmdp;
78 pte_t *ptep;
79
80 pgdp = pgd_offset_k(ea);
81 p4dp = p4d_offset(pgdp, ea);
82 if (p4d_none(*p4dp)) {
83 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
84 region_start, region_end);
85 p4d_populate(&init_mm, p4dp, pudp);
86 }
87 pudp = pud_offset(p4dp, ea);
88 if (map_page_size == PUD_SIZE) {
89 ptep = (pte_t *)pudp;
90 goto set_the_pte;
91 }
92 if (pud_none(*pudp)) {
93 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
94 region_end);
95 pud_populate(&init_mm, pudp, pmdp);
96 }
97 pmdp = pmd_offset(pudp, ea);
98 if (map_page_size == PMD_SIZE) {
99 ptep = pmdp_ptep(pmdp);
100 goto set_the_pte;
101 }
102 if (!pmd_present(*pmdp)) {
103 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
104 region_start, region_end);
105 pmd_populate_kernel(&init_mm, pmdp, ptep);
106 }
107 ptep = pte_offset_kernel(pmdp, ea);
108
109 set_the_pte:
110 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
111 asm volatile("ptesync": : :"memory");
112 return 0;
113 }
114
115 /*
116 * nid, region_start, and region_end are hints to try to place the page
117 * table memory in the same node or region.
118 */
__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid,unsigned long region_start,unsigned long region_end)119 static int __map_kernel_page(unsigned long ea, unsigned long pa,
120 pgprot_t flags,
121 unsigned int map_page_size,
122 int nid,
123 unsigned long region_start, unsigned long region_end)
124 {
125 unsigned long pfn = pa >> PAGE_SHIFT;
126 pgd_t *pgdp;
127 p4d_t *p4dp;
128 pud_t *pudp;
129 pmd_t *pmdp;
130 pte_t *ptep;
131 /*
132 * Make sure task size is correct as per the max adddr
133 */
134 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
135
136 #ifdef CONFIG_PPC_64K_PAGES
137 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
138 #endif
139
140 if (unlikely(!slab_is_available()))
141 return early_map_kernel_page(ea, pa, flags, map_page_size,
142 nid, region_start, region_end);
143
144 /*
145 * Should make page table allocation functions be able to take a
146 * node, so we can place kernel page tables on the right nodes after
147 * boot.
148 */
149 pgdp = pgd_offset_k(ea);
150 p4dp = p4d_offset(pgdp, ea);
151 pudp = pud_alloc(&init_mm, p4dp, ea);
152 if (!pudp)
153 return -ENOMEM;
154 if (map_page_size == PUD_SIZE) {
155 ptep = (pte_t *)pudp;
156 goto set_the_pte;
157 }
158 pmdp = pmd_alloc(&init_mm, pudp, ea);
159 if (!pmdp)
160 return -ENOMEM;
161 if (map_page_size == PMD_SIZE) {
162 ptep = pmdp_ptep(pmdp);
163 goto set_the_pte;
164 }
165 ptep = pte_alloc_kernel(pmdp, ea);
166 if (!ptep)
167 return -ENOMEM;
168
169 set_the_pte:
170 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
171 asm volatile("ptesync": : :"memory");
172 return 0;
173 }
174
radix__map_kernel_page(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size)175 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
176 pgprot_t flags,
177 unsigned int map_page_size)
178 {
179 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
180 }
181
182 #ifdef CONFIG_STRICT_KERNEL_RWX
radix__change_memory_range(unsigned long start,unsigned long end,unsigned long clear)183 void radix__change_memory_range(unsigned long start, unsigned long end,
184 unsigned long clear)
185 {
186 unsigned long idx;
187 pgd_t *pgdp;
188 p4d_t *p4dp;
189 pud_t *pudp;
190 pmd_t *pmdp;
191 pte_t *ptep;
192
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
195
196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
197 start, end, clear);
198
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
201 p4dp = p4d_offset(pgdp, idx);
202 pudp = pud_alloc(&init_mm, p4dp, idx);
203 if (!pudp)
204 continue;
205 if (pud_is_leaf(*pudp)) {
206 ptep = (pte_t *)pudp;
207 goto update_the_pte;
208 }
209 pmdp = pmd_alloc(&init_mm, pudp, idx);
210 if (!pmdp)
211 continue;
212 if (pmd_is_leaf(*pmdp)) {
213 ptep = pmdp_ptep(pmdp);
214 goto update_the_pte;
215 }
216 ptep = pte_alloc_kernel(pmdp, idx);
217 if (!ptep)
218 continue;
219 update_the_pte:
220 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
221 }
222
223 radix__flush_tlb_kernel_range(start, end);
224 }
225
radix__mark_rodata_ro(void)226 void radix__mark_rodata_ro(void)
227 {
228 unsigned long start, end;
229
230 start = (unsigned long)_stext;
231 end = (unsigned long)__init_begin;
232
233 radix__change_memory_range(start, end, _PAGE_WRITE);
234 }
235
radix__mark_initmem_nx(void)236 void radix__mark_initmem_nx(void)
237 {
238 unsigned long start = (unsigned long)__init_begin;
239 unsigned long end = (unsigned long)__init_end;
240
241 radix__change_memory_range(start, end, _PAGE_EXEC);
242 }
243 #endif /* CONFIG_STRICT_KERNEL_RWX */
244
245 static inline void __meminit
print_mapping(unsigned long start,unsigned long end,unsigned long size,bool exec)246 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
247 {
248 char buf[10];
249
250 if (end <= start)
251 return;
252
253 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254
255 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 exec ? " (exec)" : "");
257 }
258
next_boundary(unsigned long addr,unsigned long end)259 static unsigned long next_boundary(unsigned long addr, unsigned long end)
260 {
261 #ifdef CONFIG_STRICT_KERNEL_RWX
262 if (addr < __pa_symbol(__init_begin))
263 return __pa_symbol(__init_begin);
264 #endif
265 return end;
266 }
267
create_physical_mapping(unsigned long start,unsigned long end,unsigned long max_mapping_size,int nid,pgprot_t _prot)268 static int __meminit create_physical_mapping(unsigned long start,
269 unsigned long end,
270 unsigned long max_mapping_size,
271 int nid, pgprot_t _prot)
272 {
273 unsigned long vaddr, addr, mapping_size = 0;
274 bool prev_exec, exec = false;
275 pgprot_t prot;
276 int psize;
277
278 start = ALIGN(start, PAGE_SIZE);
279 end = ALIGN_DOWN(end, PAGE_SIZE);
280 for (addr = start; addr < end; addr += mapping_size) {
281 unsigned long gap, previous_size;
282 int rc;
283
284 gap = next_boundary(addr, end) - addr;
285 if (gap > max_mapping_size)
286 gap = max_mapping_size;
287 previous_size = mapping_size;
288 prev_exec = exec;
289
290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
291 mmu_psize_defs[MMU_PAGE_1G].shift) {
292 mapping_size = PUD_SIZE;
293 psize = MMU_PAGE_1G;
294 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
295 mmu_psize_defs[MMU_PAGE_2M].shift) {
296 mapping_size = PMD_SIZE;
297 psize = MMU_PAGE_2M;
298 } else {
299 mapping_size = PAGE_SIZE;
300 psize = mmu_virtual_psize;
301 }
302
303 vaddr = (unsigned long)__va(addr);
304
305 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
306 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
307 prot = PAGE_KERNEL_X;
308 exec = true;
309 } else {
310 prot = _prot;
311 exec = false;
312 }
313
314 if (mapping_size != previous_size || exec != prev_exec) {
315 print_mapping(start, addr, previous_size, prev_exec);
316 start = addr;
317 }
318
319 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
320 if (rc)
321 return rc;
322
323 update_page_count(psize, 1);
324 }
325
326 print_mapping(start, addr, mapping_size, exec);
327 return 0;
328 }
329
radix_init_pgtable(void)330 static void __init radix_init_pgtable(void)
331 {
332 unsigned long rts_field;
333 phys_addr_t start, end;
334 u64 i;
335
336 /* We don't support slb for radix */
337 mmu_slb_size = 0;
338
339 /*
340 * Create the linear mapping
341 */
342 for_each_mem_range(i, &start, &end) {
343 /*
344 * The memblock allocator is up at this point, so the
345 * page tables will be allocated within the range. No
346 * need or a node (which we don't have yet).
347 */
348
349 if (end >= RADIX_VMALLOC_START) {
350 pr_warn("Outside the supported range\n");
351 continue;
352 }
353
354 WARN_ON(create_physical_mapping(start, end,
355 radix_mem_block_size,
356 -1, PAGE_KERNEL));
357 }
358
359 /* Find out how many PID bits are supported */
360 if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
361 if (!mmu_pid_bits)
362 mmu_pid_bits = 20;
363 mmu_base_pid = 1;
364 } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
365 if (!mmu_pid_bits)
366 mmu_pid_bits = 20;
367 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
368 /*
369 * When KVM is possible, we only use the top half of the
370 * PID space to avoid collisions between host and guest PIDs
371 * which can cause problems due to prefetch when exiting the
372 * guest with AIL=3
373 */
374 mmu_base_pid = 1 << (mmu_pid_bits - 1);
375 #else
376 mmu_base_pid = 1;
377 #endif
378 } else {
379 /* The guest uses the bottom half of the PID space */
380 if (!mmu_pid_bits)
381 mmu_pid_bits = 19;
382 mmu_base_pid = 1;
383 }
384
385 /*
386 * Allocate Partition table and process table for the
387 * host.
388 */
389 BUG_ON(PRTB_SIZE_SHIFT > 36);
390 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
391 /*
392 * Fill in the process table.
393 */
394 rts_field = radix__get_tree_size();
395 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
396
397 /*
398 * The init_mm context is given the first available (non-zero) PID,
399 * which is the "guard PID" and contains no page table. PIDR should
400 * never be set to zero because that duplicates the kernel address
401 * space at the 0x0... offset (quadrant 0)!
402 *
403 * An arbitrary PID that may later be allocated by the PID allocator
404 * for userspace processes must not be used either, because that
405 * would cause stale user mappings for that PID on CPUs outside of
406 * the TLB invalidation scheme (because it won't be in mm_cpumask).
407 *
408 * So permanently carve out one PID for the purpose of a guard PID.
409 */
410 init_mm.context.id = mmu_base_pid;
411 mmu_base_pid++;
412 }
413
radix_init_partition_table(void)414 static void __init radix_init_partition_table(void)
415 {
416 unsigned long rts_field, dw0, dw1;
417
418 mmu_partition_table_init();
419 rts_field = radix__get_tree_size();
420 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
421 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
422 mmu_partition_table_set_entry(0, dw0, dw1, false);
423
424 pr_info("Initializing Radix MMU\n");
425 }
426
get_idx_from_shift(unsigned int shift)427 static int __init get_idx_from_shift(unsigned int shift)
428 {
429 int idx = -1;
430
431 switch (shift) {
432 case 0xc:
433 idx = MMU_PAGE_4K;
434 break;
435 case 0x10:
436 idx = MMU_PAGE_64K;
437 break;
438 case 0x15:
439 idx = MMU_PAGE_2M;
440 break;
441 case 0x1e:
442 idx = MMU_PAGE_1G;
443 break;
444 }
445 return idx;
446 }
447
radix_dt_scan_page_sizes(unsigned long node,const char * uname,int depth,void * data)448 static int __init radix_dt_scan_page_sizes(unsigned long node,
449 const char *uname, int depth,
450 void *data)
451 {
452 int size = 0;
453 int shift, idx;
454 unsigned int ap;
455 const __be32 *prop;
456 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
457
458 /* We are scanning "cpu" nodes only */
459 if (type == NULL || strcmp(type, "cpu") != 0)
460 return 0;
461
462 /* Find MMU PID size */
463 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
464 if (prop && size == 4)
465 mmu_pid_bits = be32_to_cpup(prop);
466
467 /* Grab page size encodings */
468 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
469 if (!prop)
470 return 0;
471
472 pr_info("Page sizes from device-tree:\n");
473 for (; size >= 4; size -= 4, ++prop) {
474
475 struct mmu_psize_def *def;
476
477 /* top 3 bit is AP encoding */
478 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
479 ap = be32_to_cpu(prop[0]) >> 29;
480 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
481
482 idx = get_idx_from_shift(shift);
483 if (idx < 0)
484 continue;
485
486 def = &mmu_psize_defs[idx];
487 def->shift = shift;
488 def->ap = ap;
489 }
490
491 /* needed ? */
492 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
493 return 1;
494 }
495
496 #ifdef CONFIG_MEMORY_HOTPLUG
probe_memory_block_size(unsigned long node,const char * uname,int depth,void * data)497 static int __init probe_memory_block_size(unsigned long node, const char *uname, int
498 depth, void *data)
499 {
500 unsigned long *mem_block_size = (unsigned long *)data;
501 const __be32 *prop;
502 int len;
503
504 if (depth != 1)
505 return 0;
506
507 if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
508 return 0;
509
510 prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
511
512 if (!prop || len < dt_root_size_cells * sizeof(__be32))
513 /*
514 * Nothing in the device tree
515 */
516 *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
517 else
518 *mem_block_size = of_read_number(prop, dt_root_size_cells);
519 return 1;
520 }
521
radix_memory_block_size(void)522 static unsigned long radix_memory_block_size(void)
523 {
524 unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
525
526 /*
527 * OPAL firmware feature is set by now. Hence we are ok
528 * to test OPAL feature.
529 */
530 if (firmware_has_feature(FW_FEATURE_OPAL))
531 mem_block_size = 1UL * 1024 * 1024 * 1024;
532 else
533 of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
534
535 return mem_block_size;
536 }
537
538 #else /* CONFIG_MEMORY_HOTPLUG */
539
radix_memory_block_size(void)540 static unsigned long radix_memory_block_size(void)
541 {
542 return 1UL * 1024 * 1024 * 1024;
543 }
544
545 #endif /* CONFIG_MEMORY_HOTPLUG */
546
547
radix__early_init_devtree(void)548 void __init radix__early_init_devtree(void)
549 {
550 int rc;
551
552 /*
553 * Try to find the available page sizes in the device-tree
554 */
555 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
556 if (!rc) {
557 /*
558 * No page size details found in device tree.
559 * Let's assume we have page 4k and 64k support
560 */
561 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
562 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
563
564 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
565 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
566 }
567
568 /*
569 * Max mapping size used when mapping pages. We don't use
570 * ppc_md.memory_block_size() here because this get called
571 * early and we don't have machine probe called yet. Also
572 * the pseries implementation only check for ibm,lmb-size.
573 * All hypervisor supporting radix do expose that device
574 * tree node.
575 */
576 radix_mem_block_size = radix_memory_block_size();
577 return;
578 }
579
radix_init_amor(void)580 static void radix_init_amor(void)
581 {
582 /*
583 * In HV mode, we init AMOR (Authority Mask Override Register) so that
584 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
585 * Register), enable key 0 and set it to 1.
586 *
587 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
588 */
589 mtspr(SPRN_AMOR, (3ul << 62));
590 }
591
592 #ifdef CONFIG_PPC_KUEP
setup_kuep(bool disabled)593 void setup_kuep(bool disabled)
594 {
595 if (disabled || !early_radix_enabled())
596 return;
597
598 if (smp_processor_id() == boot_cpuid) {
599 pr_info("Activating Kernel Userspace Execution Prevention\n");
600 cur_cpu_spec->mmu_features |= MMU_FTR_KUEP;
601 }
602
603 /*
604 * Radix always uses key0 of the IAMR to determine if an access is
605 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
606 * fetch.
607 */
608 mtspr(SPRN_IAMR, (1ul << 62));
609 }
610 #endif
611
612 #ifdef CONFIG_PPC_KUAP
setup_kuap(bool disabled)613 void setup_kuap(bool disabled)
614 {
615 if (disabled || !early_radix_enabled())
616 return;
617
618 if (smp_processor_id() == boot_cpuid) {
619 pr_info("Activating Kernel Userspace Access Prevention\n");
620 cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
621 }
622
623 /* Make sure userspace can't change the AMR */
624 mtspr(SPRN_UAMOR, 0);
625
626 /*
627 * Set the default kernel AMR values on all cpus.
628 */
629 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
630 isync();
631 }
632 #endif
633
radix__early_init_mmu(void)634 void __init radix__early_init_mmu(void)
635 {
636 unsigned long lpcr;
637
638 #ifdef CONFIG_PPC_64K_PAGES
639 /* PAGE_SIZE mappings */
640 mmu_virtual_psize = MMU_PAGE_64K;
641 #else
642 mmu_virtual_psize = MMU_PAGE_4K;
643 #endif
644
645 #ifdef CONFIG_SPARSEMEM_VMEMMAP
646 /* vmemmap mapping */
647 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
648 /*
649 * map vmemmap using 2M if available
650 */
651 mmu_vmemmap_psize = MMU_PAGE_2M;
652 } else
653 mmu_vmemmap_psize = mmu_virtual_psize;
654 #endif
655 /*
656 * initialize page table size
657 */
658 __pte_index_size = RADIX_PTE_INDEX_SIZE;
659 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
660 __pud_index_size = RADIX_PUD_INDEX_SIZE;
661 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
662 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
663 __pte_table_size = RADIX_PTE_TABLE_SIZE;
664 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
665 __pud_table_size = RADIX_PUD_TABLE_SIZE;
666 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
667
668 __pmd_val_bits = RADIX_PMD_VAL_BITS;
669 __pud_val_bits = RADIX_PUD_VAL_BITS;
670 __pgd_val_bits = RADIX_PGD_VAL_BITS;
671
672 __kernel_virt_start = RADIX_KERN_VIRT_START;
673 __vmalloc_start = RADIX_VMALLOC_START;
674 __vmalloc_end = RADIX_VMALLOC_END;
675 __kernel_io_start = RADIX_KERN_IO_START;
676 __kernel_io_end = RADIX_KERN_IO_END;
677 vmemmap = (struct page *)RADIX_VMEMMAP_START;
678 ioremap_bot = IOREMAP_BASE;
679
680 #ifdef CONFIG_PCI
681 pci_io_base = ISA_IO_BASE;
682 #endif
683 __pte_frag_nr = RADIX_PTE_FRAG_NR;
684 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
685 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
686 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
687
688 radix_init_pgtable();
689
690 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
691 lpcr = mfspr(SPRN_LPCR);
692 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
693 radix_init_partition_table();
694 radix_init_amor();
695 } else {
696 radix_init_pseries();
697 }
698
699 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
700
701 /* Switch to the guard PID before turning on MMU */
702 radix__switch_mmu_context(NULL, &init_mm);
703 tlbiel_all();
704 }
705
radix__early_init_mmu_secondary(void)706 void radix__early_init_mmu_secondary(void)
707 {
708 unsigned long lpcr;
709 /*
710 * update partition table control register and UPRT
711 */
712 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
713 lpcr = mfspr(SPRN_LPCR);
714 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
715
716 set_ptcr_when_no_uv(__pa(partition_tb) |
717 (PATB_SIZE_SHIFT - 12));
718
719 radix_init_amor();
720 }
721
722 radix__switch_mmu_context(NULL, &init_mm);
723 tlbiel_all();
724 }
725
radix__mmu_cleanup_all(void)726 void radix__mmu_cleanup_all(void)
727 {
728 unsigned long lpcr;
729
730 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
731 lpcr = mfspr(SPRN_LPCR);
732 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
733 set_ptcr_when_no_uv(0);
734 powernv_set_nmmu_ptcr(0);
735 radix__flush_tlb_all();
736 }
737 }
738
739 #ifdef CONFIG_MEMORY_HOTPLUG
free_pte_table(pte_t * pte_start,pmd_t * pmd)740 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
741 {
742 pte_t *pte;
743 int i;
744
745 for (i = 0; i < PTRS_PER_PTE; i++) {
746 pte = pte_start + i;
747 if (!pte_none(*pte))
748 return;
749 }
750
751 pte_free_kernel(&init_mm, pte_start);
752 pmd_clear(pmd);
753 }
754
free_pmd_table(pmd_t * pmd_start,pud_t * pud)755 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
756 {
757 pmd_t *pmd;
758 int i;
759
760 for (i = 0; i < PTRS_PER_PMD; i++) {
761 pmd = pmd_start + i;
762 if (!pmd_none(*pmd))
763 return;
764 }
765
766 pmd_free(&init_mm, pmd_start);
767 pud_clear(pud);
768 }
769
free_pud_table(pud_t * pud_start,p4d_t * p4d)770 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
771 {
772 pud_t *pud;
773 int i;
774
775 for (i = 0; i < PTRS_PER_PUD; i++) {
776 pud = pud_start + i;
777 if (!pud_none(*pud))
778 return;
779 }
780
781 pud_free(&init_mm, pud_start);
782 p4d_clear(p4d);
783 }
784
remove_pte_table(pte_t * pte_start,unsigned long addr,unsigned long end,bool direct)785 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
786 unsigned long end, bool direct)
787 {
788 unsigned long next, pages = 0;
789 pte_t *pte;
790
791 pte = pte_start + pte_index(addr);
792 for (; addr < end; addr = next, pte++) {
793 next = (addr + PAGE_SIZE) & PAGE_MASK;
794 if (next > end)
795 next = end;
796
797 if (!pte_present(*pte))
798 continue;
799
800 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
801 /*
802 * The vmemmap_free() and remove_section_mapping()
803 * codepaths call us with aligned addresses.
804 */
805 WARN_ONCE(1, "%s: unaligned range\n", __func__);
806 continue;
807 }
808
809 pte_clear(&init_mm, addr, pte);
810 pages++;
811 }
812 if (direct)
813 update_page_count(mmu_virtual_psize, -pages);
814 }
815
remove_pmd_table(pmd_t * pmd_start,unsigned long addr,unsigned long end,bool direct)816 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
817 unsigned long end, bool direct)
818 {
819 unsigned long next, pages = 0;
820 pte_t *pte_base;
821 pmd_t *pmd;
822
823 pmd = pmd_start + pmd_index(addr);
824 for (; addr < end; addr = next, pmd++) {
825 next = pmd_addr_end(addr, end);
826
827 if (!pmd_present(*pmd))
828 continue;
829
830 if (pmd_is_leaf(*pmd)) {
831 if (!IS_ALIGNED(addr, PMD_SIZE) ||
832 !IS_ALIGNED(next, PMD_SIZE)) {
833 WARN_ONCE(1, "%s: unaligned range\n", __func__);
834 continue;
835 }
836 pte_clear(&init_mm, addr, (pte_t *)pmd);
837 pages++;
838 continue;
839 }
840
841 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
842 remove_pte_table(pte_base, addr, next, direct);
843 free_pte_table(pte_base, pmd);
844 }
845 if (direct)
846 update_page_count(MMU_PAGE_2M, -pages);
847 }
848
remove_pud_table(pud_t * pud_start,unsigned long addr,unsigned long end,bool direct)849 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
850 unsigned long end, bool direct)
851 {
852 unsigned long next, pages = 0;
853 pmd_t *pmd_base;
854 pud_t *pud;
855
856 pud = pud_start + pud_index(addr);
857 for (; addr < end; addr = next, pud++) {
858 next = pud_addr_end(addr, end);
859
860 if (!pud_present(*pud))
861 continue;
862
863 if (pud_is_leaf(*pud)) {
864 if (!IS_ALIGNED(addr, PUD_SIZE) ||
865 !IS_ALIGNED(next, PUD_SIZE)) {
866 WARN_ONCE(1, "%s: unaligned range\n", __func__);
867 continue;
868 }
869 pte_clear(&init_mm, addr, (pte_t *)pud);
870 pages++;
871 continue;
872 }
873
874 pmd_base = pud_pgtable(*pud);
875 remove_pmd_table(pmd_base, addr, next, direct);
876 free_pmd_table(pmd_base, pud);
877 }
878 if (direct)
879 update_page_count(MMU_PAGE_1G, -pages);
880 }
881
remove_pagetable(unsigned long start,unsigned long end,bool direct)882 static void __meminit remove_pagetable(unsigned long start, unsigned long end,
883 bool direct)
884 {
885 unsigned long addr, next;
886 pud_t *pud_base;
887 pgd_t *pgd;
888 p4d_t *p4d;
889
890 spin_lock(&init_mm.page_table_lock);
891
892 for (addr = start; addr < end; addr = next) {
893 next = pgd_addr_end(addr, end);
894
895 pgd = pgd_offset_k(addr);
896 p4d = p4d_offset(pgd, addr);
897 if (!p4d_present(*p4d))
898 continue;
899
900 if (p4d_is_leaf(*p4d)) {
901 if (!IS_ALIGNED(addr, P4D_SIZE) ||
902 !IS_ALIGNED(next, P4D_SIZE)) {
903 WARN_ONCE(1, "%s: unaligned range\n", __func__);
904 continue;
905 }
906
907 pte_clear(&init_mm, addr, (pte_t *)pgd);
908 continue;
909 }
910
911 pud_base = p4d_pgtable(*p4d);
912 remove_pud_table(pud_base, addr, next, direct);
913 free_pud_table(pud_base, p4d);
914 }
915
916 spin_unlock(&init_mm.page_table_lock);
917 radix__flush_tlb_kernel_range(start, end);
918 }
919
radix__create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)920 int __meminit radix__create_section_mapping(unsigned long start,
921 unsigned long end, int nid,
922 pgprot_t prot)
923 {
924 if (end >= RADIX_VMALLOC_START) {
925 pr_warn("Outside the supported range\n");
926 return -1;
927 }
928
929 return create_physical_mapping(__pa(start), __pa(end),
930 radix_mem_block_size, nid, prot);
931 }
932
radix__remove_section_mapping(unsigned long start,unsigned long end)933 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
934 {
935 remove_pagetable(start, end, true);
936 return 0;
937 }
938 #endif /* CONFIG_MEMORY_HOTPLUG */
939
940 #ifdef CONFIG_SPARSEMEM_VMEMMAP
__map_kernel_page_nid(unsigned long ea,unsigned long pa,pgprot_t flags,unsigned int map_page_size,int nid)941 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
942 pgprot_t flags, unsigned int map_page_size,
943 int nid)
944 {
945 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
946 }
947
radix__vmemmap_create_mapping(unsigned long start,unsigned long page_size,unsigned long phys)948 int __meminit radix__vmemmap_create_mapping(unsigned long start,
949 unsigned long page_size,
950 unsigned long phys)
951 {
952 /* Create a PTE encoding */
953 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
954 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
955 int ret;
956
957 if ((start + page_size) >= RADIX_VMEMMAP_END) {
958 pr_warn("Outside the supported range\n");
959 return -1;
960 }
961
962 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
963 BUG_ON(ret);
964
965 return 0;
966 }
967
968 #ifdef CONFIG_MEMORY_HOTPLUG
radix__vmemmap_remove_mapping(unsigned long start,unsigned long page_size)969 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
970 {
971 remove_pagetable(start, start + page_size, false);
972 }
973 #endif
974 #endif
975
976 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
977
radix__pmd_hugepage_update(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long clr,unsigned long set)978 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
979 pmd_t *pmdp, unsigned long clr,
980 unsigned long set)
981 {
982 unsigned long old;
983
984 #ifdef CONFIG_DEBUG_VM
985 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
986 assert_spin_locked(pmd_lockptr(mm, pmdp));
987 #endif
988
989 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
990 trace_hugepage_update(addr, old, clr, set);
991
992 return old;
993 }
994
radix__pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)995 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
996 pmd_t *pmdp)
997
998 {
999 pmd_t pmd;
1000
1001 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1002 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
1003 VM_BUG_ON(pmd_devmap(*pmdp));
1004 /*
1005 * khugepaged calls this for normal pmd
1006 */
1007 pmd = *pmdp;
1008 pmd_clear(pmdp);
1009
1010 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1011
1012 return pmd;
1013 }
1014
1015 /*
1016 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1017 * page table, we consider the allocated page table as a list
1018 * head. On withdraw we need to make sure we zero out the used
1019 * list_head memory area.
1020 */
radix__pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)1021 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1022 pgtable_t pgtable)
1023 {
1024 struct list_head *lh = (struct list_head *) pgtable;
1025
1026 assert_spin_locked(pmd_lockptr(mm, pmdp));
1027
1028 /* FIFO */
1029 if (!pmd_huge_pte(mm, pmdp))
1030 INIT_LIST_HEAD(lh);
1031 else
1032 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1033 pmd_huge_pte(mm, pmdp) = pgtable;
1034 }
1035
radix__pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)1036 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1037 {
1038 pte_t *ptep;
1039 pgtable_t pgtable;
1040 struct list_head *lh;
1041
1042 assert_spin_locked(pmd_lockptr(mm, pmdp));
1043
1044 /* FIFO */
1045 pgtable = pmd_huge_pte(mm, pmdp);
1046 lh = (struct list_head *) pgtable;
1047 if (list_empty(lh))
1048 pmd_huge_pte(mm, pmdp) = NULL;
1049 else {
1050 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1051 list_del(lh);
1052 }
1053 ptep = (pte_t *) pgtable;
1054 *ptep = __pte(0);
1055 ptep++;
1056 *ptep = __pte(0);
1057 return pgtable;
1058 }
1059
radix__pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1060 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1061 unsigned long addr, pmd_t *pmdp)
1062 {
1063 pmd_t old_pmd;
1064 unsigned long old;
1065
1066 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1067 old_pmd = __pmd(old);
1068 return old_pmd;
1069 }
1070
1071 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1072
radix__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)1073 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1074 pte_t entry, unsigned long address, int psize)
1075 {
1076 struct mm_struct *mm = vma->vm_mm;
1077 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
1078 _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
1079
1080 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1081 /*
1082 * To avoid NMMU hang while relaxing access, we need mark
1083 * the pte invalid in between.
1084 */
1085 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1086 unsigned long old_pte, new_pte;
1087
1088 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1089 /*
1090 * new value of pte
1091 */
1092 new_pte = old_pte | set;
1093 radix__flush_tlb_page_psize(mm, address, psize);
1094 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1095 } else {
1096 __radix_pte_update(ptep, 0, set);
1097 /*
1098 * Book3S does not require a TLB flush when relaxing access
1099 * restrictions when the address space is not attached to a
1100 * NMMU, because the core MMU will reload the pte after taking
1101 * an access fault, which is defined by the architectue.
1102 */
1103 }
1104 /* See ptesync comment in radix__set_pte_at */
1105 }
1106
radix__ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)1107 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1108 unsigned long addr, pte_t *ptep,
1109 pte_t old_pte, pte_t pte)
1110 {
1111 struct mm_struct *mm = vma->vm_mm;
1112
1113 /*
1114 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1115 * we set the new value. We need to do this only for radix, because hash
1116 * translation does flush when updating the linux pte.
1117 */
1118 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1119 (atomic_read(&mm->context.copros) > 0))
1120 radix__flush_tlb_page(vma, addr);
1121
1122 set_pte_at(mm, addr, ptep, pte);
1123 }
1124
arch_ioremap_pud_supported(void)1125 int __init arch_ioremap_pud_supported(void)
1126 {
1127 /* HPT does not cope with large pages in the vmalloc area */
1128 return radix_enabled();
1129 }
1130
arch_ioremap_pmd_supported(void)1131 int __init arch_ioremap_pmd_supported(void)
1132 {
1133 return radix_enabled();
1134 }
1135
p4d_free_pud_page(p4d_t * p4d,unsigned long addr)1136 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1137 {
1138 return 0;
1139 }
1140
pud_set_huge(pud_t * pud,phys_addr_t addr,pgprot_t prot)1141 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1142 {
1143 pte_t *ptep = (pte_t *)pud;
1144 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1145
1146 if (!radix_enabled())
1147 return 0;
1148
1149 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1150
1151 return 1;
1152 }
1153
pud_clear_huge(pud_t * pud)1154 int pud_clear_huge(pud_t *pud)
1155 {
1156 if (pud_is_leaf(*pud)) {
1157 pud_clear(pud);
1158 return 1;
1159 }
1160
1161 return 0;
1162 }
1163
pud_free_pmd_page(pud_t * pud,unsigned long addr)1164 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1165 {
1166 pmd_t *pmd;
1167 int i;
1168
1169 pmd = pud_pgtable(*pud);
1170 pud_clear(pud);
1171
1172 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1173
1174 for (i = 0; i < PTRS_PER_PMD; i++) {
1175 if (!pmd_none(pmd[i])) {
1176 pte_t *pte;
1177 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1178
1179 pte_free_kernel(&init_mm, pte);
1180 }
1181 }
1182
1183 pmd_free(&init_mm, pmd);
1184
1185 return 1;
1186 }
1187
pmd_set_huge(pmd_t * pmd,phys_addr_t addr,pgprot_t prot)1188 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1189 {
1190 pte_t *ptep = (pte_t *)pmd;
1191 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1192
1193 if (!radix_enabled())
1194 return 0;
1195
1196 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1197
1198 return 1;
1199 }
1200
pmd_clear_huge(pmd_t * pmd)1201 int pmd_clear_huge(pmd_t *pmd)
1202 {
1203 if (pmd_is_leaf(*pmd)) {
1204 pmd_clear(pmd);
1205 return 1;
1206 }
1207
1208 return 0;
1209 }
1210
pmd_free_pte_page(pmd_t * pmd,unsigned long addr)1211 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1212 {
1213 pte_t *pte;
1214
1215 pte = (pte_t *)pmd_page_vaddr(*pmd);
1216 pmd_clear(pmd);
1217
1218 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1219
1220 pte_free_kernel(&init_mm, pte);
1221
1222 return 1;
1223 }
1224
arch_ioremap_p4d_supported(void)1225 int __init arch_ioremap_p4d_supported(void)
1226 {
1227 return 0;
1228 }
1229