1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020 Loongson Technology Corporation Limited
4 */
5 #ifndef _ASM_PGTABLE_H
6 #define _ASM_PGTABLE_H
7
8 #include <linux/mm_types.h>
9 #include <linux/mmzone.h>
10 #include <asm/pgtable-bits.h>
11 #include <asm/pgtable-64.h>
12
13 struct mm_struct;
14 struct vm_area_struct;
15
16 /*
17 * ZERO_PAGE is a global shared page that is always zero; used
18 * for zero-mapped memory areas etc..
19 */
20
21 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
22
23 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
24
25 extern void paging_init(void);
26
27 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
28 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
29 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
30
set_pte(pte_t * ptep,pte_t pteval)31 static inline void set_pte(pte_t *ptep, pte_t pteval)
32 {
33 *ptep = pteval;
34 if (pte_val(pteval) & _PAGE_GLOBAL) {
35 pte_t *buddy = ptep_buddy(ptep);
36 /*
37 * Make sure the buddy is global too (if it's !none,
38 * it better already be global)
39 */
40 #ifdef CONFIG_SMP
41 /*
42 * For SMP, multiple CPUs can race, so we need to do
43 * this atomically.
44 */
45 unsigned long page_global = _PAGE_GLOBAL;
46 unsigned long tmp;
47
48 __asm__ __volatile__ (
49 "1:" __LL "%[tmp], %[buddy] \n"
50 " bnez %[tmp], 2f \n"
51 " or %[tmp], %[tmp], %[global] \n"
52 __SC "%[tmp], %[buddy] \n"
53 " beqz %[tmp], 1b \n"
54 " nop \n"
55 "2: \n"
56 __WEAK_LLSC_MB
57 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
58 : [global] "r" (page_global));
59 #else /* !CONFIG_SMP */
60 if (pte_none(*buddy))
61 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
62 #endif /* CONFIG_SMP */
63 }
64 }
65
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)66 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
67 pte_t *ptep, pte_t pteval)
68 {
69 set_pte(ptep, pteval);
70 }
71
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)72 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
73 {
74 /* Preserve global status for the pair */
75 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
76 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
77 else
78 set_pte_at(mm, addr, ptep, __pte(0));
79 }
80
81 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
82 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
83 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
84
85 extern pgd_t swapper_pg_dir[];
86 extern pgd_t invalid_pg_dir[];
87
88 /*
89 * The following only work if pte_present() is true.
90 * Undefined behaviour if not..
91 */
pte_write(pte_t pte)92 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
pte_young(pte_t pte)93 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
pte_dirty(pte_t pte)94 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
95
pte_mkold(pte_t pte)96 static inline pte_t pte_mkold(pte_t pte)
97 {
98 pte_val(pte) &= ~_PAGE_ACCESSED;
99 return pte;
100 }
101
pte_mkyoung(pte_t pte)102 static inline pte_t pte_mkyoung(pte_t pte)
103 {
104 pte_val(pte) |= _PAGE_ACCESSED;
105 return pte;
106 }
107
pte_mkclean(pte_t pte)108 static inline pte_t pte_mkclean(pte_t pte)
109 {
110 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
111 return pte;
112 }
113
pte_mkdirty(pte_t pte)114 static inline pte_t pte_mkdirty(pte_t pte)
115 {
116 pte_val(pte) |= _PAGE_MODIFIED;
117 if (pte_val(pte) & _PAGE_WRITE)
118 pte_val(pte) |= _PAGE_DIRTY;
119 return pte;
120 }
121
pte_mkwrite(pte_t pte)122 static inline pte_t pte_mkwrite(pte_t pte)
123 {
124 pte_val(pte) |= _PAGE_WRITE;
125 if (pte_val(pte) & _PAGE_MODIFIED)
126 pte_val(pte) |= _PAGE_DIRTY;
127 return pte;
128 }
129
pte_wrprotect(pte_t pte)130 static inline pte_t pte_wrprotect(pte_t pte)
131 {
132 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
133 return pte;
134 }
135
pte_huge(pte_t pte)136 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
137
pte_mkhuge(pte_t pte)138 static inline pte_t pte_mkhuge(pte_t pte)
139 {
140 pte_val(pte) |= _PAGE_HUGE;
141 return pte;
142 }
143
144 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
pte_special(pte_t pte)145 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
pte_mkspecial(pte_t pte)146 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
147 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
148
149 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)150 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
151 {
152 if (pte_val(a) & _PAGE_PRESENT)
153 return true;
154
155 if ((pte_val(a) & _PAGE_PROTNONE) &&
156 mm_tlb_flush_pending(mm))
157 return true;
158
159 return false;
160 }
161
162 /*
163 * Conversion functions: convert a page and protection to a page entry,
164 * and a page entry and page directory to the page they refer to.
165 */
166 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
167
pte_modify(pte_t pte,pgprot_t newprot)168 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
169 {
170 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
171 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
172 }
173
174 extern void __update_tlb(struct vm_area_struct *vma,
175 unsigned long address, pte_t *ptep);
176
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)177 static inline void update_mmu_cache(struct vm_area_struct *vma,
178 unsigned long address, pte_t *ptep)
179 {
180 __update_tlb(vma, address, ptep);
181 }
182
183 #define __HAVE_ARCH_UPDATE_MMU_TLB
184 #define update_mmu_tlb update_mmu_cache
185
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)186 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
187 unsigned long address, pmd_t *pmdp)
188 {
189 __update_tlb(vma, address, (pte_t *)pmdp);
190 }
191
192 #define kern_addr_valid(addr) (1)
193
194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
195
196 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
197 #define pmdp_establish generic_pmdp_establish
198
pmd_trans_huge(pmd_t pmd)199 static inline int pmd_trans_huge(pmd_t pmd)
200 {
201 return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
202 }
203
pmd_mkhuge(pmd_t pmd)204 static inline pmd_t pmd_mkhuge(pmd_t pmd)
205 {
206 pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
207 ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
208 pmd_val(pmd) |= _PAGE_HUGE;
209
210 return pmd;
211 }
212
213 #define pmd_write pmd_write
pmd_write(pmd_t pmd)214 static inline int pmd_write(pmd_t pmd)
215 {
216 return !!(pmd_val(pmd) & _PAGE_WRITE);
217 }
218
pmd_mkwrite(pmd_t pmd)219 static inline pmd_t pmd_mkwrite(pmd_t pmd)
220 {
221 pmd_val(pmd) |= _PAGE_WRITE;
222 if (pmd_val(pmd) & _PAGE_MODIFIED)
223 pmd_val(pmd) |= _PAGE_DIRTY;
224 return pmd;
225 }
226
pmd_wrprotect(pmd_t pmd)227 static inline pmd_t pmd_wrprotect(pmd_t pmd)
228 {
229 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
230 return pmd;
231 }
232
pmd_dirty(pmd_t pmd)233 static inline int pmd_dirty(pmd_t pmd)
234 {
235 return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
236 }
237
pmd_mkclean(pmd_t pmd)238 static inline pmd_t pmd_mkclean(pmd_t pmd)
239 {
240 pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
241 return pmd;
242 }
243
pmd_mkdirty(pmd_t pmd)244 static inline pmd_t pmd_mkdirty(pmd_t pmd)
245 {
246 pmd_val(pmd) |= _PAGE_MODIFIED;
247 if (pmd_val(pmd) & _PAGE_WRITE)
248 pmd_val(pmd) |= _PAGE_DIRTY;
249 return pmd;
250 }
251
pmd_young(pmd_t pmd)252 static inline int pmd_young(pmd_t pmd)
253 {
254 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
255 }
256
pmd_mkold(pmd_t pmd)257 static inline pmd_t pmd_mkold(pmd_t pmd)
258 {
259 pmd_val(pmd) &= ~_PAGE_ACCESSED;
260 return pmd;
261 }
262
pmd_mkyoung(pmd_t pmd)263 static inline pmd_t pmd_mkyoung(pmd_t pmd)
264 {
265 pmd_val(pmd) |= _PAGE_ACCESSED;
266 return pmd;
267 }
268
pmd_pfn(pmd_t pmd)269 static inline unsigned long pmd_pfn(pmd_t pmd)
270 {
271 return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
272 }
273
pmd_page(pmd_t pmd)274 static inline struct page *pmd_page(pmd_t pmd)
275 {
276 if (pmd_trans_huge(pmd))
277 return pfn_to_page(pmd_pfn(pmd));
278
279 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
280 }
281
pmd_modify(pmd_t pmd,pgprot_t newprot)282 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
283 {
284 pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
285 (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
286 return pmd;
287 }
288
pmd_mkinvalid(pmd_t pmd)289 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
290 {
291 pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
292 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
293
294 return pmd;
295 }
296
297 /*
298 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
299 * different prototype.
300 */
301 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)302 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
303 unsigned long address, pmd_t *pmdp)
304 {
305 pmd_t old = *pmdp;
306
307 pmd_clear(pmdp);
308
309 return old;
310 }
311
312 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
313
314 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)315 static inline long pte_protnone(pte_t pte)
316 {
317 return (pte_val(pte) & _PAGE_PROTNONE);
318 }
319
pmd_protnone(pmd_t pmd)320 static inline long pmd_protnone(pmd_t pmd)
321 {
322 return (pmd_val(pmd) & _PAGE_PROTNONE);
323 }
324 #endif /* CONFIG_NUMA_BALANCING */
325
326 #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
327 #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
328
329 /*
330 * We provide our own get_unmapped area to cope with the virtual aliasing
331 * constraints placed on us by the cache architecture.
332 */
333 #define HAVE_ARCH_UNMAPPED_AREA
334 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
335
336 #endif /* _ASM_PGTABLE_H */
337