Home
last modified time | relevance | path

Searched refs:pud (Results 1 – 18 of 18) sorted by relevance

/mm/kasan/
Dinit.c62 static inline bool kasan_pmd_table(pud_t pud) in kasan_pmd_table() argument
64 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); in kasan_pmd_table()
67 static inline bool kasan_pmd_table(pud_t pud) in kasan_pmd_table() argument
113 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, in zero_pmd_populate() argument
116 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate()
149 pud_t *pud = pud_offset(p4d, addr); in zero_pud_populate() local
157 pud_populate(&init_mm, pud, in zero_pud_populate()
159 pmd = pmd_offset(pud, addr); in zero_pud_populate()
165 if (pud_none(*pud)) { in zero_pud_populate()
169 p = pmd_alloc(&init_mm, pud, addr); in zero_pud_populate()
[all …]
Dcommon.c643 pud_t *pud; in shadow_mapped() local
652 pud = pud_offset(p4d, addr); in shadow_mapped()
653 if (pud_none(*pud)) in shadow_mapped()
661 if (pud_bad(*pud)) in shadow_mapped()
663 pmd = pmd_offset(pud, addr); in shadow_mapped()
/mm/
Dhuge_memory.c859 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
862 pud = pud_mkwrite(pud); in maybe_pud_mkwrite()
863 return pud; in maybe_pud_mkwrite()
867 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) in insert_pfn_pud() argument
873 ptl = pud_lock(mm, pud); in insert_pfn_pud()
874 if (!pud_none(*pud)) { in insert_pfn_pud()
876 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { in insert_pfn_pud()
877 WARN_ON_ONCE(!is_huge_zero_pud(*pud)); in insert_pfn_pud()
880 entry = pud_mkyoung(*pud); in insert_pfn_pud()
882 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
[all …]
Dsparse-vmemmap.c168 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument
170 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate()
182 pud_t *pud = pud_offset(p4d, addr); in vmemmap_pud_populate() local
183 if (pud_none(*pud)) { in vmemmap_pud_populate()
187 pud_populate(&init_mm, pud, p); in vmemmap_pud_populate()
189 return pud; in vmemmap_pud_populate()
222 pud_t *pud; in vmemmap_populate_basepages() local
233 pud = vmemmap_pud_populate(p4d, addr, node); in vmemmap_populate_basepages()
234 if (!pud) in vmemmap_populate_basepages()
236 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages()
Dpagewalk.c29 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument
37 pmd = pmd_offset(pud, addr); in walk_pmd_range()
78 pud_t *pud; in walk_pud_range() local
83 pud = pud_offset(p4d, addr); in walk_pud_range()
87 if (pud_none(*pud) || !walk->vma) { in walk_pud_range()
96 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); in walk_pud_range()
99 err = ops->pud_entry(pud, addr, next, walk); in walk_pud_range()
107 split_huge_pud(walk->vma, pud, addr); in walk_pud_range()
108 if (pud_none(*pud)) in walk_pud_range()
112 err = walk_pmd_range(pud, addr, next, walk); in walk_pud_range()
[all …]
Dmemory.c223 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument
232 pmd = pmd_offset(pud, addr); in free_pmd_range()
251 pmd = pmd_offset(pud, start); in free_pmd_range()
252 pud_clear(pud); in free_pmd_range()
261 pud_t *pud; in free_pud_range() local
266 pud = pud_offset(p4d, addr); in free_pud_range()
269 if (pud_none_or_clear_bad(pud)) in free_pud_range()
271 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
272 } while (pud++, addr = next, addr != end); in free_pud_range()
285 pud = pud_offset(p4d, start); in free_pud_range()
[all …]
Dgup.c428 pud_t *pud; in follow_pud_mask() local
433 pud = pud_offset(p4dp, address); in follow_pud_mask()
434 if (pud_none(*pud)) in follow_pud_mask()
436 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_pud_mask()
437 page = follow_huge_pud(mm, address, pud, flags); in follow_pud_mask()
442 if (is_hugepd(__hugepd(pud_val(*pud)))) { in follow_pud_mask()
444 __hugepd(pud_val(*pud)), flags, in follow_pud_mask()
450 if (pud_devmap(*pud)) { in follow_pud_mask()
451 ptl = pud_lock(mm, pud); in follow_pud_mask()
452 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); in follow_pud_mask()
[all …]
Dpgtable-generic.c33 void pud_clear_bad(pud_t *pud) in pud_clear_bad() argument
35 pud_ERROR(*pud); in pud_clear_bad()
36 pud_clear(pud); in pud_clear_bad()
140 pud_t pud; in pudp_huge_clear_flush() local
144 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush()
146 return pud; in pudp_huge_clear_flush()
Dmremap.c37 pud_t *pud; in get_old_pmd() local
48 pud = pud_offset(p4d, addr); in get_old_pmd()
49 if (pud_none_or_clear_bad(pud)) in get_old_pmd()
52 pmd = pmd_offset(pud, addr); in get_old_pmd()
64 pud_t *pud; in alloc_new_pmd() local
71 pud = pud_alloc(mm, p4d, addr); in alloc_new_pmd()
72 if (!pud) in alloc_new_pmd()
75 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd()
Dhmm.c647 static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) in pud_to_hmm_pfn_flags() argument
649 if (!pud_present(pud)) in pud_to_hmm_pfn_flags()
651 return pud_write(pud) ? range->flags[HMM_PFN_VALID] | in pud_to_hmm_pfn_flags()
663 pud_t pud; in hmm_vma_walk_pud() local
667 pud = READ_ONCE(*pudp); in hmm_vma_walk_pud()
668 if (pud_none(pud)) in hmm_vma_walk_pud()
671 if (pud_huge(pud) && pud_devmap(pud)) { in hmm_vma_walk_pud()
676 if (!pud_present(pud)) in hmm_vma_walk_pud()
683 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
690 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in hmm_vma_walk_pud()
Dmprotect.c165 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument
176 pmd = pmd_offset(pud, addr); in change_pmd_range()
231 pud_t *pud; in change_pud_range() local
235 pud = pud_offset(p4d, addr); in change_pud_range()
238 if (pud_none_or_clear_bad(pud)) in change_pud_range()
240 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
242 } while (pud++, addr = next, addr != end); in change_pud_range()
Dpage_vma_mapped.c144 pud_t *pud; in page_vma_mapped_walk() local
173 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
174 if (!pud_present(*pud)) in page_vma_mapped_walk()
176 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
Dvmalloc.c74 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument
79 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
92 pud_t *pud; in vunmap_pud_range() local
95 pud = pud_offset(p4d, addr); in vunmap_pud_range()
98 if (pud_clear_huge(pud)) in vunmap_pud_range()
100 if (pud_none_or_clear_bad(pud)) in vunmap_pud_range()
102 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range()
103 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
163 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument
169 pmd = pmd_alloc(&init_mm, pud, addr); in vmap_pmd_range()
[all …]
Dhugetlb.c4879 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) in huge_pmd_share() argument
4892 return (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share()
4914 if (pud_none(*pud)) { in huge_pmd_share()
4915 pud_populate(mm, pud, in huge_pmd_share()
4923 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share()
4944 pud_t *pud = pud_offset(p4d, *addr); in huge_pmd_unshare() local
4950 pud_clear(pud); in huge_pmd_unshare()
4958 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) in huge_pmd_share() argument
4981 pud_t *pud; in huge_pte_alloc() local
4988 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc()
[all …]
Duserfaultfd.c151 pud_t *pud; in mm_alloc_pmd() local
157 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd()
158 if (!pud) in mm_alloc_pmd()
165 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
Dmemory-failure.c270 pud_t *pud; in dev_pagemap_mapping_shift() local
280 pud = pud_offset(p4d, address); in dev_pagemap_mapping_shift()
281 if (!pud_present(*pud)) in dev_pagemap_mapping_shift()
283 if (pud_devmap(*pud)) in dev_pagemap_mapping_shift()
285 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift()
Drmap.c715 pud_t *pud; in mm_find_pmd() local
727 pud = pud_offset(p4d, address); in mm_find_pmd()
728 if (!pud_present(*pud)) in mm_find_pmd()
731 pmd = pmd_offset(pud, address); in mm_find_pmd()
Dswapfile.c1977 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, in unuse_pmd_range() argument
1986 pmd = pmd_offset(pud, addr); in unuse_pmd_range()
2005 pud_t *pud; in unuse_pud_range() local
2009 pud = pud_offset(p4d, addr); in unuse_pud_range()
2012 if (pud_none_or_clear_bad(pud)) in unuse_pud_range()
2014 ret = unuse_pmd_range(vma, pud, addr, next, type, in unuse_pud_range()
2018 } while (pud++, addr = next, addr != end); in unuse_pud_range()