Lines Matching full:pmd
16 * allocate the first pmd adjacent to the pgd. This means that we can
17 * subtract a constant offset to get to it. The pmd and pgd sizes are
18 * arranged so that a single pmd covers 4GB (giving a full 64-bit
32 /* Populate first pmd with allocated memory. We mark it in pgd_alloc()
34 * pmd entry may not be cleared. */ in pgd_alloc()
39 /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as in pgd_alloc()
40 * a signal that this pmd may not be freed */ in pgd_alloc()
57 /* Three Level Page Table Support for pmd's */
59 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) in pgd_populate() argument
62 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); in pgd_populate()
67 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); in pmd_alloc_one() local
68 if (pmd) in pmd_alloc_one()
69 memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); in pmd_alloc_one()
70 return pmd; in pmd_alloc_one()
73 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) in pmd_free() argument
75 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { in pmd_free()
77 * This is the permanent pmd attached to the pgd; in pmd_free()
85 free_pages((unsigned long)pmd, PMD_ORDER); in pmd_free()
90 /* Two Level Page Table Support for pmd's */
93 * allocating and freeing a pmd is trivial: the 1-entry pmd is
99 #define pgd_populate(mm, pmd, pte) BUG() argument
104 pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) in pmd_populate_kernel() argument
108 * the permanent pmd */ in pmd_populate_kernel()
109 if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) in pmd_populate_kernel()
110 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | in pmd_populate_kernel()
116 __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) in pmd_populate_kernel()
120 #define pmd_populate(mm, pmd, pte_page) \ argument
121 pmd_populate_kernel(mm, pmd, page_address(pte_page))
122 #define pmd_pgtable(pmd) pmd_page(pmd) argument