• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  This file contains pgtable related functions for 64-bit machines.
4  *
5  *  Derived from arch/ppc64/mm/init.c
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  *
8  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10  *    Copyright (C) 1996 Paul Mackerras
11  *
12  *  Derived from "arch/i386/mm/init.c"
13  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
14  *
15  *  Dave Engebretsen <engebret@us.ibm.com>
16  *      Rework for PPC64 port.
17  */
18 
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/export.h>
25 #include <linux/types.h>
26 #include <linux/mman.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/stddef.h>
30 #include <linux/vmalloc.h>
31 #include <linux/slab.h>
32 #include <linux/hugetlb.h>
33 
34 #include <asm/page.h>
35 #include <asm/prom.h>
36 #include <asm/mmu_context.h>
37 #include <asm/mmu.h>
38 #include <asm/smp.h>
39 #include <asm/machdep.h>
40 #include <asm/tlb.h>
41 #include <asm/processor.h>
42 #include <asm/cputable.h>
43 #include <asm/sections.h>
44 #include <asm/firmware.h>
45 #include <asm/dma.h>
46 
47 #include <mm/mmu_decl.h>
48 
49 
50 #ifdef CONFIG_PPC_BOOK3S_64
51 /*
52  * partition table and process table for ISA 3.0
53  */
54 struct prtb_entry *process_tb;
55 struct patb_entry *partition_tb;
56 /*
57  * page table size
58  */
59 unsigned long __pte_index_size;
60 EXPORT_SYMBOL(__pte_index_size);
61 unsigned long __pmd_index_size;
62 EXPORT_SYMBOL(__pmd_index_size);
63 unsigned long __pud_index_size;
64 EXPORT_SYMBOL(__pud_index_size);
65 unsigned long __pgd_index_size;
66 EXPORT_SYMBOL(__pgd_index_size);
67 unsigned long __pud_cache_index;
68 EXPORT_SYMBOL(__pud_cache_index);
69 unsigned long __pte_table_size;
70 EXPORT_SYMBOL(__pte_table_size);
71 unsigned long __pmd_table_size;
72 EXPORT_SYMBOL(__pmd_table_size);
73 unsigned long __pud_table_size;
74 EXPORT_SYMBOL(__pud_table_size);
75 unsigned long __pgd_table_size;
76 EXPORT_SYMBOL(__pgd_table_size);
77 unsigned long __pmd_val_bits;
78 EXPORT_SYMBOL(__pmd_val_bits);
79 unsigned long __pud_val_bits;
80 EXPORT_SYMBOL(__pud_val_bits);
81 unsigned long __pgd_val_bits;
82 EXPORT_SYMBOL(__pgd_val_bits);
83 unsigned long __kernel_virt_start;
84 EXPORT_SYMBOL(__kernel_virt_start);
85 unsigned long __vmalloc_start;
86 EXPORT_SYMBOL(__vmalloc_start);
87 unsigned long __vmalloc_end;
88 EXPORT_SYMBOL(__vmalloc_end);
89 unsigned long __kernel_io_start;
90 EXPORT_SYMBOL(__kernel_io_start);
91 unsigned long __kernel_io_end;
92 struct page *vmemmap;
93 EXPORT_SYMBOL(vmemmap);
94 unsigned long __pte_frag_nr;
95 EXPORT_SYMBOL(__pte_frag_nr);
96 unsigned long __pte_frag_size_shift;
97 EXPORT_SYMBOL(__pte_frag_size_shift);
98 #endif
99 
100 #ifndef __PAGETABLE_PUD_FOLDED
101 /* 4 level page table */
p4d_page(p4d_t p4d)102 struct page *p4d_page(p4d_t p4d)
103 {
104 	if (p4d_is_leaf(p4d)) {
105 		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
106 			VM_WARN_ON(!p4d_huge(p4d));
107 		return pte_page(p4d_pte(p4d));
108 	}
109 	return virt_to_page(p4d_page_vaddr(p4d));
110 }
111 #endif
112 
pud_page(pud_t pud)113 struct page *pud_page(pud_t pud)
114 {
115 	if (pud_is_leaf(pud)) {
116 		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
117 			VM_WARN_ON(!pud_huge(pud));
118 		return pte_page(pud_pte(pud));
119 	}
120 	return virt_to_page(pud_page_vaddr(pud));
121 }
122 
123 /*
124  * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
125  * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
126  */
pmd_page(pmd_t pmd)127 struct page *pmd_page(pmd_t pmd)
128 {
129 	if (pmd_is_leaf(pmd)) {
130 		/*
131 		 * vmalloc_to_page may be called on any vmap address (not only
132 		 * vmalloc), and it uses pmd_page() etc., when huge vmap is
133 		 * enabled so these checks can't be used.
134 		 */
135 		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
136 			VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
137 		return pte_page(pmd_pte(pmd));
138 	}
139 	return virt_to_page(pmd_page_vaddr(pmd));
140 }
141 
142 #ifdef CONFIG_STRICT_KERNEL_RWX
mark_rodata_ro(void)143 void mark_rodata_ro(void)
144 {
145 	if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
146 		pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
147 		return;
148 	}
149 
150 	if (radix_enabled())
151 		radix__mark_rodata_ro();
152 	else
153 		hash__mark_rodata_ro();
154 
155 	// mark_initmem_nx() should have already run by now
156 	ptdump_check_wx();
157 }
158 
mark_initmem_nx(void)159 void mark_initmem_nx(void)
160 {
161 	if (radix_enabled())
162 		radix__mark_initmem_nx();
163 	else
164 		hash__mark_initmem_nx();
165 }
166 #endif
167