1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8 #ifndef _ASM_PGTABLE_H
9 #define _ASM_PGTABLE_H
10
11 #include <linux/mm_types.h>
12 #include <linux/mmzone.h>
13 #ifdef CONFIG_32BIT
14 #include <asm/pgtable-32.h>
15 #endif
16 #ifdef CONFIG_64BIT
17 #include <asm/pgtable-64.h>
18 #endif
19
20 #include <asm/io.h>
21 #include <asm/pgtable-bits.h>
22
23 struct mm_struct;
24 struct vm_area_struct;
25
26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
27 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
28 _page_cachable_default)
29 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
30 _page_cachable_default)
31 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
32 _page_cachable_default)
33 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
34 _PAGE_GLOBAL | _page_cachable_default)
35 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
37 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
38 _page_cachable_default)
39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
41
42 /*
43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
44 * execute, and consider it to be the same as read. Also, write
45 * permissions imply read permissions. This is the closest we can get
46 * by reasonable means..
47 */
48
49 /*
50 * Dummy values to fill the table in mmap.c
51 * The real values will be generated at runtime
52 */
53 #define __P000 __pgprot(0)
54 #define __P001 __pgprot(0)
55 #define __P010 __pgprot(0)
56 #define __P011 __pgprot(0)
57 #define __P100 __pgprot(0)
58 #define __P101 __pgprot(0)
59 #define __P110 __pgprot(0)
60 #define __P111 __pgprot(0)
61
62 #define __S000 __pgprot(0)
63 #define __S001 __pgprot(0)
64 #define __S010 __pgprot(0)
65 #define __S011 __pgprot(0)
66 #define __S100 __pgprot(0)
67 #define __S101 __pgprot(0)
68 #define __S110 __pgprot(0)
69 #define __S111 __pgprot(0)
70
71 extern unsigned long _page_cachable_default;
72
73 /*
74 * ZERO_PAGE is a global shared page that is always zero; used
75 * for zero-mapped memory areas etc..
76 */
77
78 extern unsigned long empty_zero_page;
79 extern unsigned long zero_page_mask;
80
81 #define ZERO_PAGE(vaddr) \
82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
83 #define __HAVE_COLOR_ZERO_PAGE
84
85 extern void paging_init(void);
86
87 /*
88 * Conversion functions: convert a page and protection to a page entry,
89 * and a page entry and page directory to the page they refer to.
90 */
91 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
92
93 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
94 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
95 #define pmd_page(pmd) __pmd_page(pmd)
96 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
97
98 #define pmd_page_vaddr(pmd) pmd_val(pmd)
99
100 #define htw_stop() \
101 do { \
102 unsigned long flags; \
103 \
104 if (cpu_has_htw) { \
105 local_irq_save(flags); \
106 if(!raw_current_cpu_data.htw_seq++) { \
107 write_c0_pwctl(read_c0_pwctl() & \
108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
109 back_to_back_c0_hazard(); \
110 } \
111 local_irq_restore(flags); \
112 } \
113 } while(0)
114
115 #define htw_start() \
116 do { \
117 unsigned long flags; \
118 \
119 if (cpu_has_htw) { \
120 local_irq_save(flags); \
121 if (!--raw_current_cpu_data.htw_seq) { \
122 write_c0_pwctl(read_c0_pwctl() | \
123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
124 back_to_back_c0_hazard(); \
125 } \
126 local_irq_restore(flags); \
127 } \
128 } while(0)
129
130
131 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
132
133 #define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
134 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
135
set_pte(pte_t * ptep,pte_t pte)136 static inline void set_pte(pte_t *ptep, pte_t pte)
137 {
138 ptep->pte_high = pte.pte_high;
139 smp_wmb();
140 ptep->pte_low = pte.pte_low;
141
142 if (pte.pte_high & _PAGE_GLOBAL) {
143 pte_t *buddy = ptep_buddy(ptep);
144 /*
145 * Make sure the buddy is global too (if it's !none,
146 * it better already be global)
147 */
148 if (pte_none(*buddy))
149 buddy->pte_high |= _PAGE_GLOBAL;
150 }
151 }
152 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
153
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)154 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
155 {
156 pte_t null = __pte(0);
157
158 htw_stop();
159 /* Preserve global status for the pair */
160 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
161 null.pte_high = _PAGE_GLOBAL;
162
163 set_pte_at(mm, addr, ptep, null);
164 htw_start();
165 }
166 #else
167
168 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
169 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
170
171 /*
172 * Certain architectures need to do special things when pte's
173 * within a page table are directly modified. Thus, the following
174 * hook is made available.
175 */
set_pte(pte_t * ptep,pte_t pteval)176 static inline void set_pte(pte_t *ptep, pte_t pteval)
177 {
178 *ptep = pteval;
179 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
180 if (pte_val(pteval) & _PAGE_GLOBAL) {
181 pte_t *buddy = ptep_buddy(ptep);
182 /*
183 * Make sure the buddy is global too (if it's !none,
184 * it better already be global)
185 */
186 #ifdef CONFIG_SMP
187 /*
188 * For SMP, multiple CPUs can race, so we need to do
189 * this atomically.
190 */
191 #ifdef CONFIG_64BIT
192 #define LL_INSN "lld"
193 #define SC_INSN "scd"
194 #else /* CONFIG_32BIT */
195 #define LL_INSN "ll"
196 #define SC_INSN "sc"
197 #endif
198 unsigned long page_global = _PAGE_GLOBAL;
199 unsigned long tmp;
200
201 __asm__ __volatile__ (
202 " .set push\n"
203 " .set noreorder\n"
204 "1: " LL_INSN " %[tmp], %[buddy]\n"
205 " bnez %[tmp], 2f\n"
206 " or %[tmp], %[tmp], %[global]\n"
207 " " SC_INSN " %[tmp], %[buddy]\n"
208 " beqz %[tmp], 1b\n"
209 " nop\n"
210 "2:\n"
211 " .set pop"
212 : [buddy] "+m" (buddy->pte),
213 [tmp] "=&r" (tmp)
214 : [global] "r" (page_global));
215 #else /* !CONFIG_SMP */
216 if (pte_none(*buddy))
217 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
218 #endif /* CONFIG_SMP */
219 }
220 #endif
221 }
222 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
223
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)224 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
225 {
226 htw_stop();
227 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
228 /* Preserve global status for the pair */
229 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
230 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
231 else
232 #endif
233 set_pte_at(mm, addr, ptep, __pte(0));
234 htw_start();
235 }
236 #endif
237
238 /*
239 * (pmds are folded into puds so this doesn't get actually called,
240 * but the define is needed for a generic inline function.)
241 */
242 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
243
244 #ifndef __PAGETABLE_PMD_FOLDED
245 /*
246 * (puds are folded into pgds so this doesn't get actually called,
247 * but the define is needed for a generic inline function.)
248 */
249 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
250 #endif
251
252 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
253 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
254 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
255
256 /*
257 * We used to declare this array with size but gcc 3.3 and older are not able
258 * to find that this expression is a constant, so the size is dropped.
259 */
260 extern pgd_t swapper_pg_dir[];
261
262 /*
263 * The following only work if pte_present() is true.
264 * Undefined behaviour if not..
265 */
266 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
pte_write(pte_t pte)267 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
pte_dirty(pte_t pte)268 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
pte_young(pte_t pte)269 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
pte_file(pte_t pte)270 static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
271
pte_wrprotect(pte_t pte)272 static inline pte_t pte_wrprotect(pte_t pte)
273 {
274 pte.pte_low &= ~_PAGE_WRITE;
275 pte.pte_high &= ~_PAGE_SILENT_WRITE;
276 return pte;
277 }
278
pte_mkclean(pte_t pte)279 static inline pte_t pte_mkclean(pte_t pte)
280 {
281 pte.pte_low &= ~_PAGE_MODIFIED;
282 pte.pte_high &= ~_PAGE_SILENT_WRITE;
283 return pte;
284 }
285
pte_mkold(pte_t pte)286 static inline pte_t pte_mkold(pte_t pte)
287 {
288 pte.pte_low &= ~_PAGE_ACCESSED;
289 pte.pte_high &= ~_PAGE_SILENT_READ;
290 return pte;
291 }
292
pte_mkwrite(pte_t pte)293 static inline pte_t pte_mkwrite(pte_t pte)
294 {
295 pte.pte_low |= _PAGE_WRITE;
296 if (pte.pte_low & _PAGE_MODIFIED)
297 pte.pte_high |= _PAGE_SILENT_WRITE;
298 return pte;
299 }
300
pte_mkdirty(pte_t pte)301 static inline pte_t pte_mkdirty(pte_t pte)
302 {
303 pte.pte_low |= _PAGE_MODIFIED;
304 if (pte.pte_low & _PAGE_WRITE)
305 pte.pte_high |= _PAGE_SILENT_WRITE;
306 return pte;
307 }
308
pte_mkyoung(pte_t pte)309 static inline pte_t pte_mkyoung(pte_t pte)
310 {
311 pte.pte_low |= _PAGE_ACCESSED;
312 if (pte.pte_low & _PAGE_READ)
313 pte.pte_high |= _PAGE_SILENT_READ;
314 return pte;
315 }
316 #else
pte_write(pte_t pte)317 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
pte_dirty(pte_t pte)318 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
pte_young(pte_t pte)319 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
pte_file(pte_t pte)320 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
321
pte_wrprotect(pte_t pte)322 static inline pte_t pte_wrprotect(pte_t pte)
323 {
324 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
325 return pte;
326 }
327
pte_mkclean(pte_t pte)328 static inline pte_t pte_mkclean(pte_t pte)
329 {
330 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
331 return pte;
332 }
333
pte_mkold(pte_t pte)334 static inline pte_t pte_mkold(pte_t pte)
335 {
336 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
337 return pte;
338 }
339
pte_mkwrite(pte_t pte)340 static inline pte_t pte_mkwrite(pte_t pte)
341 {
342 pte_val(pte) |= _PAGE_WRITE;
343 if (pte_val(pte) & _PAGE_MODIFIED)
344 pte_val(pte) |= _PAGE_SILENT_WRITE;
345 return pte;
346 }
347
pte_mkdirty(pte_t pte)348 static inline pte_t pte_mkdirty(pte_t pte)
349 {
350 pte_val(pte) |= _PAGE_MODIFIED;
351 if (pte_val(pte) & _PAGE_WRITE)
352 pte_val(pte) |= _PAGE_SILENT_WRITE;
353 return pte;
354 }
355
pte_mkyoung(pte_t pte)356 static inline pte_t pte_mkyoung(pte_t pte)
357 {
358 pte_val(pte) |= _PAGE_ACCESSED;
359 #ifdef CONFIG_CPU_MIPSR2
360 if (!(pte_val(pte) & _PAGE_NO_READ))
361 pte_val(pte) |= _PAGE_SILENT_READ;
362 else
363 #endif
364 if (pte_val(pte) & _PAGE_READ)
365 pte_val(pte) |= _PAGE_SILENT_READ;
366 return pte;
367 }
368
369 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
pte_huge(pte_t pte)370 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
371
pte_mkhuge(pte_t pte)372 static inline pte_t pte_mkhuge(pte_t pte)
373 {
374 pte_val(pte) |= _PAGE_HUGE;
375 return pte;
376 }
377 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
378 #endif
pte_special(pte_t pte)379 static inline int pte_special(pte_t pte) { return 0; }
pte_mkspecial(pte_t pte)380 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
381
382 /*
383 * Macro to make mark a page protection value as "uncacheable". Note
384 * that "protection" is really a misnomer here as the protection value
385 * contains the memory attribute bits, dirty bits, and various other
386 * bits as well.
387 */
388 #define pgprot_noncached pgprot_noncached
389
pgprot_noncached(pgprot_t _prot)390 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
391 {
392 unsigned long prot = pgprot_val(_prot);
393
394 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
395
396 return __pgprot(prot);
397 }
398
pgprot_writecombine(pgprot_t _prot)399 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
400 {
401 unsigned long prot = pgprot_val(_prot);
402
403 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
404 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
405
406 return __pgprot(prot);
407 }
408
409 /*
410 * Conversion functions: convert a page and protection to a page entry,
411 * and a page entry and page directory to the page they refer to.
412 */
413 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
414
415 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
pte_modify(pte_t pte,pgprot_t newprot)416 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
417 {
418 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
419 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
420 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
421 pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
422 return pte;
423 }
424 #else
pte_modify(pte_t pte,pgprot_t newprot)425 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
426 {
427 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
428 }
429 #endif
430
431
432 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
433 pte_t pte);
434 extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
435 pte_t pte);
436
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)437 static inline void update_mmu_cache(struct vm_area_struct *vma,
438 unsigned long address, pte_t *ptep)
439 {
440 pte_t pte = *ptep;
441 __update_tlb(vma, address, pte);
442 __update_cache(vma, address, pte);
443 }
444
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)445 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
446 unsigned long address, pmd_t *pmdp)
447 {
448 pte_t pte = *(pte_t *)pmdp;
449
450 __update_tlb(vma, address, pte);
451 }
452
453 #define kern_addr_valid(addr) (1)
454
455 #ifdef CONFIG_PHYS_ADDR_T_64BIT
456 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
457
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long vaddr,unsigned long pfn,unsigned long size,pgprot_t prot)458 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
459 unsigned long vaddr,
460 unsigned long pfn,
461 unsigned long size,
462 pgprot_t prot)
463 {
464 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
465 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
466 }
467 #define io_remap_pfn_range io_remap_pfn_range
468 #endif
469
470 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
471
472 extern int has_transparent_hugepage(void);
473
pmd_trans_huge(pmd_t pmd)474 static inline int pmd_trans_huge(pmd_t pmd)
475 {
476 return !!(pmd_val(pmd) & _PAGE_HUGE);
477 }
478
pmd_mkhuge(pmd_t pmd)479 static inline pmd_t pmd_mkhuge(pmd_t pmd)
480 {
481 pmd_val(pmd) |= _PAGE_HUGE;
482
483 return pmd;
484 }
485
pmd_trans_splitting(pmd_t pmd)486 static inline int pmd_trans_splitting(pmd_t pmd)
487 {
488 return !!(pmd_val(pmd) & _PAGE_SPLITTING);
489 }
490
pmd_mksplitting(pmd_t pmd)491 static inline pmd_t pmd_mksplitting(pmd_t pmd)
492 {
493 pmd_val(pmd) |= _PAGE_SPLITTING;
494
495 return pmd;
496 }
497
498 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
499 pmd_t *pmdp, pmd_t pmd);
500
501 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
502 /* Extern to avoid header file madness */
503 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
504 unsigned long address,
505 pmd_t *pmdp);
506
507 #define __HAVE_ARCH_PMD_WRITE
pmd_write(pmd_t pmd)508 static inline int pmd_write(pmd_t pmd)
509 {
510 return !!(pmd_val(pmd) & _PAGE_WRITE);
511 }
512
pmd_wrprotect(pmd_t pmd)513 static inline pmd_t pmd_wrprotect(pmd_t pmd)
514 {
515 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
516 return pmd;
517 }
518
pmd_mkwrite(pmd_t pmd)519 static inline pmd_t pmd_mkwrite(pmd_t pmd)
520 {
521 pmd_val(pmd) |= _PAGE_WRITE;
522 if (pmd_val(pmd) & _PAGE_MODIFIED)
523 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
524
525 return pmd;
526 }
527
pmd_dirty(pmd_t pmd)528 static inline int pmd_dirty(pmd_t pmd)
529 {
530 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
531 }
532
pmd_mkclean(pmd_t pmd)533 static inline pmd_t pmd_mkclean(pmd_t pmd)
534 {
535 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
536 return pmd;
537 }
538
pmd_mkdirty(pmd_t pmd)539 static inline pmd_t pmd_mkdirty(pmd_t pmd)
540 {
541 pmd_val(pmd) |= _PAGE_MODIFIED;
542 if (pmd_val(pmd) & _PAGE_WRITE)
543 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
544
545 return pmd;
546 }
547
pmd_young(pmd_t pmd)548 static inline int pmd_young(pmd_t pmd)
549 {
550 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
551 }
552
pmd_mkold(pmd_t pmd)553 static inline pmd_t pmd_mkold(pmd_t pmd)
554 {
555 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
556
557 return pmd;
558 }
559
pmd_mkyoung(pmd_t pmd)560 static inline pmd_t pmd_mkyoung(pmd_t pmd)
561 {
562 pmd_val(pmd) |= _PAGE_ACCESSED;
563
564 #ifdef CONFIG_CPU_MIPSR2
565 if (!(pmd_val(pmd) & _PAGE_NO_READ))
566 pmd_val(pmd) |= _PAGE_SILENT_READ;
567 else
568 #endif
569 if (pmd_val(pmd) & _PAGE_READ)
570 pmd_val(pmd) |= _PAGE_SILENT_READ;
571
572 return pmd;
573 }
574
575 /* Extern to avoid header file madness */
576 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
577
pmd_pfn(pmd_t pmd)578 static inline unsigned long pmd_pfn(pmd_t pmd)
579 {
580 return pmd_val(pmd) >> _PFN_SHIFT;
581 }
582
pmd_page(pmd_t pmd)583 static inline struct page *pmd_page(pmd_t pmd)
584 {
585 if (pmd_trans_huge(pmd))
586 return pfn_to_page(pmd_pfn(pmd));
587
588 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
589 }
590
pmd_modify(pmd_t pmd,pgprot_t newprot)591 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
592 {
593 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
594 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
595 return pmd;
596 }
597
pmd_mknotpresent(pmd_t pmd)598 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
599 {
600 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
601
602 return pmd;
603 }
604
605 /*
606 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
607 * different prototype.
608 */
609 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
pmdp_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)610 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
611 unsigned long address, pmd_t *pmdp)
612 {
613 pmd_t old = *pmdp;
614
615 pmd_clear(pmdp);
616
617 return old;
618 }
619
620 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
621
622 #include <asm-generic/pgtable.h>
623
624 /*
625 * uncached accelerated TLB map for video memory access
626 */
627 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
628 #define __HAVE_PHYS_MEM_ACCESS_PROT
629
630 struct file;
631 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
632 unsigned long size, pgprot_t vma_prot);
633 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
634 unsigned long size, pgprot_t *vma_prot);
635 #endif
636
637 /*
638 * We provide our own get_unmapped area to cope with the virtual aliasing
639 * constraints placed on us by the cache architecture.
640 */
641 #define HAVE_ARCH_UNMAPPED_AREA
642 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
643
644 /*
645 * No page table caches to initialise
646 */
647 #define pgtable_cache_init() do { } while (0)
648
649 #endif /* _ASM_PGTABLE_H */
650