• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PGTABLE_H
3 #define _ASM_POWERPC_PGTABLE_H
4 
5 #ifndef __ASSEMBLY__
6 #include <linux/mmdebug.h>
7 #include <linux/mmzone.h>
8 #include <asm/processor.h>		/* For TASK_SIZE */
9 #include <asm/mmu.h>
10 #include <asm/page.h>
11 #include <asm/tlbflush.h>
12 
13 struct mm_struct;
14 
15 #endif /* !__ASSEMBLY__ */
16 
17 #ifdef CONFIG_PPC_BOOK3S
18 #include <asm/book3s/pgtable.h>
19 #else
20 #include <asm/nohash/pgtable.h>
21 #endif /* !CONFIG_PPC_BOOK3S */
22 
23 /*
24  * Protection used for kernel text. We want the debuggers to be able to
25  * set breakpoints anywhere, so don't write protect the kernel text
26  * on platforms where such control is possible.
27  */
28 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
31 #else
32 #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
33 #endif
34 
35 /* Make modules code happy. We don't set RO yet */
36 #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
37 
38 /* Advertise special mapping type for AGP */
39 #define PAGE_AGP		(PAGE_KERNEL_NC)
40 #define HAVE_PAGE_AGP
41 
42 #ifndef __ASSEMBLY__
43 
44 #define PFN_PTE_SHIFT		PTE_RPN_SHIFT
45 
46 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
47 		pte_t pte, unsigned int nr);
48 #define set_ptes set_ptes
49 #define update_mmu_cache(vma, addr, ptep) \
50 	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
51 
52 #ifndef MAX_PTRS_PER_PGD
53 #define MAX_PTRS_PER_PGD PTRS_PER_PGD
54 #endif
55 
56 /* Keep these as a macros to avoid include dependency mess */
57 #define pte_page(x)		pfn_to_page(pte_pfn(x))
58 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
59 
pte_pfn(pte_t pte)60 static inline unsigned long pte_pfn(pte_t pte)
61 {
62 	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
63 }
64 
65 /*
66  * Select all bits except the pfn
67  */
pte_pgprot(pte_t pte)68 static inline pgprot_t pte_pgprot(pte_t pte)
69 {
70 	unsigned long pte_flags;
71 
72 	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
73 	return __pgprot(pte_flags);
74 }
75 
76 #ifndef pmd_page_vaddr
pmd_page_vaddr(pmd_t pmd)77 static inline const void *pmd_page_vaddr(pmd_t pmd)
78 {
79 	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
80 }
81 #define pmd_page_vaddr pmd_page_vaddr
82 #endif
83 /*
84  * ZERO_PAGE is a global shared page that is always zero: used
85  * for zero-mapped memory areas etc..
86  */
87 extern unsigned long empty_zero_page[];
88 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
89 
90 extern pgd_t swapper_pg_dir[];
91 
92 extern void paging_init(void);
93 void poking_init(void);
94 
95 extern unsigned long ioremap_bot;
96 extern const pgprot_t protection_map[16];
97 
98 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
99 #define pmd_large(pmd)		0
100 #endif
101 
102 /* can we use this in kvm */
103 unsigned long vmalloc_to_phys(void *vmalloc_addr);
104 
105 void pgtable_cache_add(unsigned int shift);
106 
107 pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
108 
109 #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
110 void mark_initmem_nx(void);
111 #else
mark_initmem_nx(void)112 static inline void mark_initmem_nx(void) { }
113 #endif
114 
115 /*
116  * When used, PTE_FRAG_NR is defined in subarch pgtable.h
117  * so we are sure it is included when arriving here.
118  */
119 #ifdef PTE_FRAG_NR
pte_frag_get(mm_context_t * ctx)120 static inline void *pte_frag_get(mm_context_t *ctx)
121 {
122 	return ctx->pte_frag;
123 }
124 
pte_frag_set(mm_context_t * ctx,void * p)125 static inline void pte_frag_set(mm_context_t *ctx, void *p)
126 {
127 	ctx->pte_frag = p;
128 }
129 #else
130 #define PTE_FRAG_NR		1
131 #define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
132 #define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
133 
pte_frag_get(mm_context_t * ctx)134 static inline void *pte_frag_get(mm_context_t *ctx)
135 {
136 	return NULL;
137 }
138 
pte_frag_set(mm_context_t * ctx,void * p)139 static inline void pte_frag_set(mm_context_t *ctx, void *p)
140 {
141 }
142 #endif
143 
144 #ifndef pmd_is_leaf
145 #define pmd_is_leaf pmd_is_leaf
pmd_is_leaf(pmd_t pmd)146 static inline bool pmd_is_leaf(pmd_t pmd)
147 {
148 	return false;
149 }
150 #endif
151 
152 #ifndef pud_is_leaf
153 #define pud_is_leaf pud_is_leaf
pud_is_leaf(pud_t pud)154 static inline bool pud_is_leaf(pud_t pud)
155 {
156 	return false;
157 }
158 #endif
159 
160 #ifndef p4d_is_leaf
161 #define p4d_is_leaf p4d_is_leaf
p4d_is_leaf(p4d_t p4d)162 static inline bool p4d_is_leaf(p4d_t p4d)
163 {
164 	return false;
165 }
166 #endif
167 
168 #define pmd_pgtable pmd_pgtable
pmd_pgtable(pmd_t pmd)169 static inline pgtable_t pmd_pgtable(pmd_t pmd)
170 {
171 	return (pgtable_t)pmd_page_vaddr(pmd);
172 }
173 
174 #ifdef CONFIG_PPC64
175 int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
176 bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
177 			   unsigned long page_size);
178 /*
179  * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
180  * some of the restrictions. We don't check for PMD_SIZE because our
181  * vmemmap allocation code can fallback correctly. The pageblock
182  * alignment requirement is met using altmap->reserve blocks.
183  */
184 #define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
arch_supports_memmap_on_memory(unsigned long vmemmap_size)185 static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
186 {
187 	if (!radix_enabled())
188 		return false;
189 	/*
190 	 * With 4K page size and 2M PMD_SIZE, we can align
191 	 * things better with memory block size value
192 	 * starting from 128MB. Hence align things with PMD_SIZE.
193 	 */
194 	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
195 		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
196 	return true;
197 }
198 
199 #endif /* CONFIG_PPC64 */
200 
201 #endif /* __ASSEMBLY__ */
202 
203 #endif /* _ASM_POWERPC_PGTABLE_H */
204