1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19
20 #include <asm/cmpxchg.h>
21 #include <asm/io.h>
22 #include <asm/pgtable-bits.h>
23 #include <asm/cpu-features.h>
24
25 struct mm_struct;
26 struct vm_area_struct;
27
28 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
29 _page_cachable_default)
30 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
31 _page_cachable_default)
32 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
33 _page_cachable_default)
34 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
35 _page_cachable_default)
36 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 _PAGE_GLOBAL | _page_cachable_default)
38 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
39 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
40 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
41 _page_cachable_default)
42 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
43 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
44
45 /*
46 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
47 * execute, and consider it to be the same as read. Also, write
48 * permissions imply read permissions. This is the closest we can get
49 * by reasonable means..
50 */
51
52 /*
53 * Dummy values to fill the table in mmap.c
54 * The real values will be generated at runtime
55 */
56 #define __P000 __pgprot(0)
57 #define __P001 __pgprot(0)
58 #define __P010 __pgprot(0)
59 #define __P011 __pgprot(0)
60 #define __P100 __pgprot(0)
61 #define __P101 __pgprot(0)
62 #define __P110 __pgprot(0)
63 #define __P111 __pgprot(0)
64
65 #define __S000 __pgprot(0)
66 #define __S001 __pgprot(0)
67 #define __S010 __pgprot(0)
68 #define __S011 __pgprot(0)
69 #define __S100 __pgprot(0)
70 #define __S101 __pgprot(0)
71 #define __S110 __pgprot(0)
72 #define __S111 __pgprot(0)
73
74 extern unsigned long _page_cachable_default;
75
76 /*
77 * ZERO_PAGE is a global shared page that is always zero; used
78 * for zero-mapped memory areas etc..
79 */
80
81 extern unsigned long empty_zero_page;
82 extern unsigned long zero_page_mask;
83
84 #define ZERO_PAGE(vaddr) \
85 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
86 #define __HAVE_COLOR_ZERO_PAGE
87
88 extern void paging_init(void);
89
90 /*
91 * Conversion functions: convert a page and protection to a page entry,
92 * and a page entry and page directory to the page they refer to.
93 */
94 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
95
96 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
97 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
98 #define pmd_page(pmd) __pmd_page(pmd)
99 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
100
101 #define pmd_page_vaddr(pmd) pmd_val(pmd)
102
103 #define htw_stop() \
104 do { \
105 unsigned long flags; \
106 \
107 if (cpu_has_htw) { \
108 local_irq_save(flags); \
109 if(!raw_current_cpu_data.htw_seq++) { \
110 write_c0_pwctl(read_c0_pwctl() & \
111 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
112 back_to_back_c0_hazard(); \
113 } \
114 local_irq_restore(flags); \
115 } \
116 } while(0)
117
118 #define htw_start() \
119 do { \
120 unsigned long flags; \
121 \
122 if (cpu_has_htw) { \
123 local_irq_save(flags); \
124 if (!--raw_current_cpu_data.htw_seq) { \
125 write_c0_pwctl(read_c0_pwctl() | \
126 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
127 back_to_back_c0_hazard(); \
128 } \
129 local_irq_restore(flags); \
130 } \
131 } while(0)
132
133 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
134 pte_t *ptep, pte_t pteval);
135
136 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
137
138 #ifdef CONFIG_XPA
139 # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
140 #else
141 # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
142 #endif
143
144 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
145 #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
146
set_pte(pte_t * ptep,pte_t pte)147 static inline void set_pte(pte_t *ptep, pte_t pte)
148 {
149 ptep->pte_high = pte.pte_high;
150 smp_wmb();
151 ptep->pte_low = pte.pte_low;
152
153 #ifdef CONFIG_XPA
154 if (pte.pte_high & _PAGE_GLOBAL) {
155 #else
156 if (pte.pte_low & _PAGE_GLOBAL) {
157 #endif
158 pte_t *buddy = ptep_buddy(ptep);
159 /*
160 * Make sure the buddy is global too (if it's !none,
161 * it better already be global)
162 */
163 if (pte_none(*buddy)) {
164 if (!IS_ENABLED(CONFIG_XPA))
165 buddy->pte_low |= _PAGE_GLOBAL;
166 buddy->pte_high |= _PAGE_GLOBAL;
167 }
168 }
169 }
170
171 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
172 {
173 pte_t null = __pte(0);
174
175 htw_stop();
176 /* Preserve global status for the pair */
177 if (IS_ENABLED(CONFIG_XPA)) {
178 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
179 null.pte_high = _PAGE_GLOBAL;
180 } else {
181 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
182 null.pte_low = null.pte_high = _PAGE_GLOBAL;
183 }
184
185 set_pte_at(mm, addr, ptep, null);
186 htw_start();
187 }
188 #else
189
190 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
191 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
192 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
193
194 /*
195 * Certain architectures need to do special things when pte's
196 * within a page table are directly modified. Thus, the following
197 * hook is made available.
198 */
199 static inline void set_pte(pte_t *ptep, pte_t pteval)
200 {
201 *ptep = pteval;
202 #if !defined(CONFIG_CPU_R3K_TLB)
203 if (pte_val(pteval) & _PAGE_GLOBAL) {
204 pte_t *buddy = ptep_buddy(ptep);
205 /*
206 * Make sure the buddy is global too (if it's !none,
207 * it better already be global)
208 */
209 # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
210 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
211 # else
212 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
213 # endif
214 }
215 #endif
216 }
217
218 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
219 {
220 htw_stop();
221 #if !defined(CONFIG_CPU_R3K_TLB)
222 /* Preserve global status for the pair */
223 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
224 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
225 else
226 #endif
227 set_pte_at(mm, addr, ptep, __pte(0));
228 htw_start();
229 }
230 #endif
231
232 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
233 pte_t *ptep, pte_t pteval)
234 {
235 extern void __update_cache(unsigned long address, pte_t pte);
236
237 if (!pte_present(pteval))
238 goto cache_sync_done;
239
240 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
241 goto cache_sync_done;
242
243 __update_cache(addr, pteval);
244 cache_sync_done:
245 set_pte(ptep, pteval);
246 }
247
248 /*
249 * (pmds are folded into puds so this doesn't get actually called,
250 * but the define is needed for a generic inline function.)
251 */
252 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
253
254 #ifndef __PAGETABLE_PMD_FOLDED
255 /*
256 * (puds are folded into pgds so this doesn't get actually called,
257 * but the define is needed for a generic inline function.)
258 */
259 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
260 #endif
261
262 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
263 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
264 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
265
266 /*
267 * We used to declare this array with size but gcc 3.3 and older are not able
268 * to find that this expression is a constant, so the size is dropped.
269 */
270 extern pgd_t swapper_pg_dir[];
271
272 /*
273 * The following only work if pte_present() is true.
274 * Undefined behaviour if not..
275 */
276 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
277 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
278 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
279 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
280 static inline int pte_special(pte_t pte) { return pte.pte_low & _PAGE_SPECIAL; }
281
282 static inline pte_t pte_wrprotect(pte_t pte)
283 {
284 pte.pte_low &= ~_PAGE_WRITE;
285 if (!IS_ENABLED(CONFIG_XPA))
286 pte.pte_low &= ~_PAGE_SILENT_WRITE;
287 pte.pte_high &= ~_PAGE_SILENT_WRITE;
288 return pte;
289 }
290
291 static inline pte_t pte_mkclean(pte_t pte)
292 {
293 pte.pte_low &= ~_PAGE_MODIFIED;
294 if (!IS_ENABLED(CONFIG_XPA))
295 pte.pte_low &= ~_PAGE_SILENT_WRITE;
296 pte.pte_high &= ~_PAGE_SILENT_WRITE;
297 return pte;
298 }
299
300 static inline pte_t pte_mkold(pte_t pte)
301 {
302 pte.pte_low &= ~_PAGE_ACCESSED;
303 if (!IS_ENABLED(CONFIG_XPA))
304 pte.pte_low &= ~_PAGE_SILENT_READ;
305 pte.pte_high &= ~_PAGE_SILENT_READ;
306 return pte;
307 }
308
309 static inline pte_t pte_mkwrite(pte_t pte)
310 {
311 pte.pte_low |= _PAGE_WRITE;
312 if (pte.pte_low & _PAGE_MODIFIED) {
313 if (!IS_ENABLED(CONFIG_XPA))
314 pte.pte_low |= _PAGE_SILENT_WRITE;
315 pte.pte_high |= _PAGE_SILENT_WRITE;
316 }
317 return pte;
318 }
319
320 static inline pte_t pte_mkdirty(pte_t pte)
321 {
322 pte.pte_low |= _PAGE_MODIFIED;
323 if (pte.pte_low & _PAGE_WRITE) {
324 if (!IS_ENABLED(CONFIG_XPA))
325 pte.pte_low |= _PAGE_SILENT_WRITE;
326 pte.pte_high |= _PAGE_SILENT_WRITE;
327 }
328 return pte;
329 }
330
331 static inline pte_t pte_mkyoung(pte_t pte)
332 {
333 pte.pte_low |= _PAGE_ACCESSED;
334 if (!(pte.pte_low & _PAGE_NO_READ)) {
335 if (!IS_ENABLED(CONFIG_XPA))
336 pte.pte_low |= _PAGE_SILENT_READ;
337 pte.pte_high |= _PAGE_SILENT_READ;
338 }
339 return pte;
340 }
341
342 static inline pte_t pte_mkspecial(pte_t pte)
343 {
344 pte.pte_low |= _PAGE_SPECIAL;
345 return pte;
346 }
347 #else
348 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
349 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
350 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
351 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
352
353 static inline pte_t pte_wrprotect(pte_t pte)
354 {
355 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
356 return pte;
357 }
358
359 static inline pte_t pte_mkclean(pte_t pte)
360 {
361 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
362 return pte;
363 }
364
365 static inline pte_t pte_mkold(pte_t pte)
366 {
367 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
368 return pte;
369 }
370
371 static inline pte_t pte_mkwrite(pte_t pte)
372 {
373 pte_val(pte) |= _PAGE_WRITE;
374 if (pte_val(pte) & _PAGE_MODIFIED)
375 pte_val(pte) |= _PAGE_SILENT_WRITE;
376 return pte;
377 }
378
379 static inline pte_t pte_mkdirty(pte_t pte)
380 {
381 pte_val(pte) |= _PAGE_MODIFIED;
382 if (pte_val(pte) & _PAGE_WRITE)
383 pte_val(pte) |= _PAGE_SILENT_WRITE;
384 return pte;
385 }
386
387 static inline pte_t pte_mkyoung(pte_t pte)
388 {
389 pte_val(pte) |= _PAGE_ACCESSED;
390 if (!(pte_val(pte) & _PAGE_NO_READ))
391 pte_val(pte) |= _PAGE_SILENT_READ;
392 return pte;
393 }
394
395 static inline pte_t pte_mkspecial(pte_t pte)
396 {
397 pte_val(pte) |= _PAGE_SPECIAL;
398 return pte;
399 }
400
401 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
402 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
403
404 static inline pte_t pte_mkhuge(pte_t pte)
405 {
406 pte_val(pte) |= _PAGE_HUGE;
407 return pte;
408 }
409 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
410 #endif
411
412 /*
413 * Macro to make mark a page protection value as "uncacheable". Note
414 * that "protection" is really a misnomer here as the protection value
415 * contains the memory attribute bits, dirty bits, and various other
416 * bits as well.
417 */
418 #define pgprot_noncached pgprot_noncached
419
420 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
421 {
422 unsigned long prot = pgprot_val(_prot);
423
424 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
425
426 return __pgprot(prot);
427 }
428
429 #define pgprot_writecombine pgprot_writecombine
430
431 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
432 {
433 unsigned long prot = pgprot_val(_prot);
434
435 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
436 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
437
438 return __pgprot(prot);
439 }
440
441 /*
442 * Conversion functions: convert a page and protection to a page entry,
443 * and a page entry and page directory to the page they refer to.
444 */
445 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
446
447 #if defined(CONFIG_XPA)
448 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
449 {
450 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
451 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
452 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
453 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
454 return pte;
455 }
456 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
457 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
458 {
459 pte.pte_low &= _PAGE_CHG_MASK;
460 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
461 pte.pte_low |= pgprot_val(newprot);
462 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
463 return pte;
464 }
465 #else
466 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
467 {
468 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
469 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
470 }
471 #endif
472
473
474 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
475 pte_t pte);
476
477 static inline void update_mmu_cache(struct vm_area_struct *vma,
478 unsigned long address, pte_t *ptep)
479 {
480 pte_t pte = *ptep;
481 __update_tlb(vma, address, pte);
482 }
483
484 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
485 unsigned long address, pmd_t *pmdp)
486 {
487 pte_t pte = *(pte_t *)pmdp;
488
489 __update_tlb(vma, address, pte);
490 }
491
492 #define kern_addr_valid(addr) (1)
493
494 #ifdef CONFIG_PHYS_ADDR_T_64BIT
495 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
496
497 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
498 unsigned long vaddr,
499 unsigned long pfn,
500 unsigned long size,
501 pgprot_t prot)
502 {
503 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
504 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
505 }
506 #define io_remap_pfn_range io_remap_pfn_range
507 #endif
508
509 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
510
511 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
512 #define pmdp_establish generic_pmdp_establish
513
514 #define has_transparent_hugepage has_transparent_hugepage
515 extern int has_transparent_hugepage(void);
516
517 static inline int pmd_trans_huge(pmd_t pmd)
518 {
519 return !!(pmd_val(pmd) & _PAGE_HUGE);
520 }
521
522 static inline pmd_t pmd_mkhuge(pmd_t pmd)
523 {
524 pmd_val(pmd) |= _PAGE_HUGE;
525
526 return pmd;
527 }
528
529 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
530 pmd_t *pmdp, pmd_t pmd);
531
532 #define pmd_write pmd_write
533 static inline int pmd_write(pmd_t pmd)
534 {
535 return !!(pmd_val(pmd) & _PAGE_WRITE);
536 }
537
538 static inline pmd_t pmd_wrprotect(pmd_t pmd)
539 {
540 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
541 return pmd;
542 }
543
544 static inline pmd_t pmd_mkwrite(pmd_t pmd)
545 {
546 pmd_val(pmd) |= _PAGE_WRITE;
547 if (pmd_val(pmd) & _PAGE_MODIFIED)
548 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
549
550 return pmd;
551 }
552
553 static inline int pmd_dirty(pmd_t pmd)
554 {
555 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
556 }
557
558 static inline pmd_t pmd_mkclean(pmd_t pmd)
559 {
560 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
561 return pmd;
562 }
563
564 static inline pmd_t pmd_mkdirty(pmd_t pmd)
565 {
566 pmd_val(pmd) |= _PAGE_MODIFIED;
567 if (pmd_val(pmd) & _PAGE_WRITE)
568 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
569
570 return pmd;
571 }
572
573 static inline int pmd_young(pmd_t pmd)
574 {
575 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
576 }
577
578 static inline pmd_t pmd_mkold(pmd_t pmd)
579 {
580 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
581
582 return pmd;
583 }
584
585 static inline pmd_t pmd_mkyoung(pmd_t pmd)
586 {
587 pmd_val(pmd) |= _PAGE_ACCESSED;
588
589 if (!(pmd_val(pmd) & _PAGE_NO_READ))
590 pmd_val(pmd) |= _PAGE_SILENT_READ;
591
592 return pmd;
593 }
594
595 /* Extern to avoid header file madness */
596 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
597
598 static inline unsigned long pmd_pfn(pmd_t pmd)
599 {
600 return pmd_val(pmd) >> _PFN_SHIFT;
601 }
602
603 static inline struct page *pmd_page(pmd_t pmd)
604 {
605 if (pmd_trans_huge(pmd))
606 return pfn_to_page(pmd_pfn(pmd));
607
608 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
609 }
610
611 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
612 {
613 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
614 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
615 return pmd;
616 }
617
618 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
619 {
620 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
621
622 return pmd;
623 }
624
625 /*
626 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
627 * different prototype.
628 */
629 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
630 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
631 unsigned long address, pmd_t *pmdp)
632 {
633 pmd_t old = *pmdp;
634
635 pmd_clear(pmdp);
636
637 return old;
638 }
639
640 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
641
642 #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
643
644 #include <asm-generic/pgtable.h>
645
646 /*
647 * uncached accelerated TLB map for video memory access
648 */
649 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
650 #define __HAVE_PHYS_MEM_ACCESS_PROT
651
652 struct file;
653 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
654 unsigned long size, pgprot_t vma_prot);
655 #endif
656
657 /*
658 * We provide our own get_unmapped area to cope with the virtual aliasing
659 * constraints placed on us by the cache architecture.
660 */
661 #define HAVE_ARCH_UNMAPPED_AREA
662 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
663
664 #endif /* _ASM_PGTABLE_H */
665