• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
3 
4 /*
5  * This file contains the functions and defines necessary to allocate
6  * page tables.
7  *
8  * This hopefully works with any (fixed) ia-64 page-size, as defined
9  * in <asm/page.h> (currently 8192).
10  *
11  * Copyright (C) 1998-2001 Hewlett-Packard Co
12  *	David Mosberger-Tang <davidm@hpl.hp.com>
13  * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14  */
15 
16 
17 #include <linux/compiler.h>
18 #include <linux/mm.h>
19 #include <linux/page-flags.h>
20 #include <linux/threads.h>
21 #include <linux/quicklist.h>
22 
23 #include <asm/mmu_context.h>
24 
pgd_alloc(struct mm_struct * mm)25 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
26 {
27 	return quicklist_alloc(0, GFP_KERNEL, NULL);
28 }
29 
pgd_free(struct mm_struct * mm,pgd_t * pgd)30 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
31 {
32 	quicklist_free(0, NULL, pgd);
33 }
34 
35 #if CONFIG_PGTABLE_LEVELS == 4
36 static inline void
pgd_populate(struct mm_struct * mm,pgd_t * pgd_entry,pud_t * pud)37 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
38 {
39 	pgd_val(*pgd_entry) = __pa(pud);
40 }
41 
pud_alloc_one(struct mm_struct * mm,unsigned long addr)42 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
43 {
44 	return quicklist_alloc(0, GFP_KERNEL, NULL);
45 }
46 
pud_free(struct mm_struct * mm,pud_t * pud)47 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
48 {
49 	quicklist_free(0, NULL, pud);
50 }
51 #define __pud_free_tlb(tlb, pud, address)	pud_free((tlb)->mm, pud)
52 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
53 
54 static inline void
pud_populate(struct mm_struct * mm,pud_t * pud_entry,pmd_t * pmd)55 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
56 {
57 	pud_val(*pud_entry) = __pa(pmd);
58 }
59 
pmd_alloc_one(struct mm_struct * mm,unsigned long addr)60 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
61 {
62 	return quicklist_alloc(0, GFP_KERNEL, NULL);
63 }
64 
pmd_free(struct mm_struct * mm,pmd_t * pmd)65 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
66 {
67 	quicklist_free(0, NULL, pmd);
68 }
69 
70 #define __pmd_free_tlb(tlb, pmd, address)	pmd_free((tlb)->mm, pmd)
71 
72 static inline void
pmd_populate(struct mm_struct * mm,pmd_t * pmd_entry,pgtable_t pte)73 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
74 {
75 	pmd_val(*pmd_entry) = page_to_phys(pte);
76 }
77 #define pmd_pgtable(pmd) pmd_page(pmd)
78 
79 static inline void
pmd_populate_kernel(struct mm_struct * mm,pmd_t * pmd_entry,pte_t * pte)80 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
81 {
82 	pmd_val(*pmd_entry) = __pa(pte);
83 }
84 
pte_alloc_one(struct mm_struct * mm,unsigned long addr)85 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr)
86 {
87 	struct page *page;
88 	void *pg;
89 
90 	pg = quicklist_alloc(0, GFP_KERNEL, NULL);
91 	if (!pg)
92 		return NULL;
93 	page = virt_to_page(pg);
94 	if (!pgtable_page_ctor(page)) {
95 		quicklist_free(0, NULL, pg);
96 		return NULL;
97 	}
98 	return page;
99 }
100 
pte_alloc_one_kernel(struct mm_struct * mm,unsigned long addr)101 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
102 					  unsigned long addr)
103 {
104 	return quicklist_alloc(0, GFP_KERNEL, NULL);
105 }
106 
pte_free(struct mm_struct * mm,pgtable_t pte)107 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
108 {
109 	pgtable_page_dtor(pte);
110 	quicklist_free_page(0, NULL, pte);
111 }
112 
pte_free_kernel(struct mm_struct * mm,pte_t * pte)113 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
114 {
115 	quicklist_free(0, NULL, pte);
116 }
117 
check_pgt_cache(void)118 static inline void check_pgt_cache(void)
119 {
120 	quicklist_trim(0, NULL, 25, 16);
121 }
122 
123 #define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, pte)
124 
125 #endif				/* _ASM_IA64_PGALLOC_H */
126