Home
last modified time | relevance | path

Searched refs:pud (Results 1 – 23 of 23) sorted by relevance

/mm/kasan/
Dinit.c57 static inline bool kasan_pmd_table(pud_t pud) in kasan_pmd_table() argument
59 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd)); in kasan_pmd_table()
62 static inline bool kasan_pmd_table(pud_t pud) in kasan_pmd_table() argument
109 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, in zero_pmd_populate() argument
112 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate()
145 pud_t *pud = pud_offset(p4d, addr); in zero_pud_populate() local
153 pud_populate(&init_mm, pud, in zero_pud_populate()
155 pmd = pmd_offset(pud, addr); in zero_pud_populate()
161 if (pud_none(*pud)) { in zero_pud_populate()
165 p = pmd_alloc(&init_mm, pud, addr); in zero_pud_populate()
[all …]
Dshadow.c144 pud_t *pud; in shadow_mapped() local
153 pud = pud_offset(p4d, addr); in shadow_mapped()
154 if (pud_none(*pud)) in shadow_mapped()
162 if (pud_bad(*pud)) in shadow_mapped()
164 pmd = pmd_offset(pud, addr); in shadow_mapped()
/mm/
Ddebug_vm_pgtable.c297 pud_t pud; in pud_basic_tests() local
303 pud = pfn_pud(pfn, prot); in pud_basic_tests()
312 WARN_ON(pud_dirty(pud_wrprotect(pud))); in pud_basic_tests()
314 WARN_ON(!pud_same(pud, pud)); in pud_basic_tests()
315 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); in pud_basic_tests()
316 WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud)))); in pud_basic_tests()
317 WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud)))); in pud_basic_tests()
318 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); in pud_basic_tests()
319 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); in pud_basic_tests()
320 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); in pud_basic_tests()
[all …]
Dioremap.c106 static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, in ioremap_pmd_range() argument
113 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); in ioremap_pmd_range()
130 static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr, in ioremap_try_huge_pud() argument
146 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) in ioremap_try_huge_pud()
149 return pud_set_huge(pud, phys_addr, prot); in ioremap_try_huge_pud()
156 pud_t *pud; in ioremap_pud_range() local
159 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in ioremap_pud_range()
160 if (!pud) in ioremap_pud_range()
165 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) { in ioremap_pud_range()
170 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask)) in ioremap_pud_range()
[all …]
Dhuge_memory.c867 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
870 pud = pud_mkwrite(pud); in maybe_pud_mkwrite()
871 return pud; in maybe_pud_mkwrite()
875 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write) in insert_pfn_pud() argument
881 ptl = pud_lock(mm, pud); in insert_pfn_pud()
882 if (!pud_none(*pud)) { in insert_pfn_pud()
884 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) { in insert_pfn_pud()
885 WARN_ON_ONCE(!is_huge_zero_pud(*pud)); in insert_pfn_pud()
888 entry = pud_mkyoung(*pud); in insert_pfn_pud()
890 if (pudp_set_access_flags(vma, addr, pud, entry, 1)) in insert_pfn_pud()
[all …]
Dsparse-vmemmap.c171 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument
173 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate()
185 pud_t *pud = pud_offset(p4d, addr); in vmemmap_pud_populate() local
186 if (pud_none(*pud)) { in vmemmap_pud_populate()
190 pud_populate(&init_mm, pud, p); in vmemmap_pud_populate()
192 return pud; in vmemmap_pud_populate()
225 pud_t *pud; in vmemmap_populate_basepages() local
236 pud = vmemmap_pud_populate(p4d, addr, node); in vmemmap_populate_basepages()
237 if (!pud) in vmemmap_populate_basepages()
239 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages()
Dpagewalk.c61 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument
70 pmd = pmd_offset(pud, addr); in walk_pmd_range()
122 pud_t *pud; in walk_pud_range() local
128 pud = pud_offset(p4d, addr); in walk_pud_range()
132 if (pud_none(*pud)) { in walk_pud_range()
143 err = ops->pud_entry(pud, addr, next, walk); in walk_pud_range()
150 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || in walk_pud_range()
156 split_huge_pud(walk->vma, pud, addr); in walk_pud_range()
157 if (pud_none(*pud)) in walk_pud_range()
160 err = walk_pmd_range(pud, addr, next, walk); in walk_pud_range()
[all …]
Dmremap.c37 pud_t *pud; in get_old_pud() local
47 pud = pud_offset(p4d, addr); in get_old_pud()
48 if (pud_none_or_clear_bad(pud)) in get_old_pud()
51 return pud; in get_old_pud()
56 pud_t *pud; in get_old_pmd() local
59 pud = get_old_pud(mm, addr); in get_old_pmd()
60 if (!pud) in get_old_pmd()
63 pmd = pmd_offset(pud, addr); in get_old_pmd()
87 pud_t *pud; in alloc_new_pmd() local
90 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
[all …]
Dhmm.c400 pud_t pud) in pud_to_hmm_pfn_flags() argument
402 if (!pud_present(pud)) in pud_to_hmm_pfn_flags()
404 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : in pud_to_hmm_pfn_flags()
415 pud_t pud; in hmm_vma_walk_pud() local
425 pud = READ_ONCE(*pudp); in hmm_vma_walk_pud()
426 if (pud_none(pud)) { in hmm_vma_walk_pud()
431 if (pud_huge(pud) && pud_devmap(pud)) { in hmm_vma_walk_pud()
437 if (!pud_present(pud)) { in hmm_vma_walk_pud()
446 cpu_flags = pud_to_hmm_pfn_flags(range, pud); in hmm_vma_walk_pud()
454 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); in hmm_vma_walk_pud()
Dpgtable-generic.c36 void pud_clear_bad(pud_t *pud) in pud_clear_bad() argument
38 pud_ERROR(*pud); in pud_clear_bad()
39 pud_clear(pud); in pud_clear_bad()
149 pud_t pud; in pudp_huge_clear_flush() local
153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush()
155 return pud; in pudp_huge_clear_flush()
Dmemory.c264 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument
273 pmd = pmd_offset(pud, addr); in free_pmd_range()
292 pmd = pmd_offset(pud, start); in free_pmd_range()
293 pud_clear(pud); in free_pmd_range()
302 pud_t *pud; in free_pud_range() local
307 pud = pud_offset(p4d, addr); in free_pud_range()
310 if (pud_none_or_clear_bad(pud)) in free_pud_range()
312 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
313 } while (pud++, addr = next, addr != end); in free_pud_range()
326 pud = pud_offset(p4d, start); in free_pud_range()
[all …]
Dpgalloc-track.h32 static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, in pmd_alloc_track() argument
36 if (unlikely(pud_none(*pud))) { in pmd_alloc_track()
37 if (__pmd_alloc(mm, pud, address)) in pmd_alloc_track()
42 return pmd_offset(pud, address); in pmd_alloc_track()
Dgup.c707 pud_t *pud; in follow_pud_mask() local
712 pud = pud_offset(p4dp, address); in follow_pud_mask()
713 if (pud_none(*pud)) in follow_pud_mask()
715 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) { in follow_pud_mask()
716 page = follow_huge_pud(mm, address, pud, flags); in follow_pud_mask()
721 if (is_hugepd(__hugepd(pud_val(*pud)))) { in follow_pud_mask()
723 __hugepd(pud_val(*pud)), flags, in follow_pud_mask()
729 if (pud_devmap(*pud)) { in follow_pud_mask()
730 ptl = pud_lock(mm, pud); in follow_pud_mask()
731 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); in follow_pud_mask()
[all …]
Dmprotect.c213 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument
224 pmd = pmd_offset(pud, addr); in change_pmd_range()
288 pud_t *pud; in change_pud_range() local
292 pud = pud_offset(p4d, addr); in change_pud_range()
295 if (pud_none_or_clear_bad(pud)) in change_pud_range()
297 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
299 } while (pud++, addr = next, addr != end); in change_pud_range()
Dvmalloc.c86 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in vunmap_pmd_range() argument
93 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
114 pud_t *pud; in vunmap_pud_range() local
118 pud = pud_offset(p4d, addr); in vunmap_pud_range()
122 cleared = pud_clear_huge(pud); in vunmap_pud_range()
123 if (cleared || pud_bad(*pud)) in vunmap_pud_range()
128 if (pud_none_or_clear_bad(pud)) in vunmap_pud_range()
130 vunmap_pmd_range(pud, addr, next, mask); in vunmap_pud_range()
131 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
221 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument
[all …]
Dpage_vma_mapped.c156 pud_t *pud; in page_vma_mapped_walk() local
203 pud = pud_offset(p4d, pvmw->address); in page_vma_mapped_walk()
204 if (!pud_present(*pud)) { in page_vma_mapped_walk()
209 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
Duserfaultfd.c257 pud_t *pud; in mm_alloc_pmd() local
263 pud = pud_alloc(mm, p4d, address); in mm_alloc_pmd()
264 if (!pud) in mm_alloc_pmd()
271 return pmd_alloc(mm, pud, address); in mm_alloc_pmd()
Dptdump.c70 static int ptdump_pud_entry(pud_t *pud, unsigned long addr, in ptdump_pud_entry() argument
74 pud_t val = READ_ONCE(*pud); in ptdump_pud_entry()
Dhugetlb.c5447 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
5478 if (pud_none(*pud)) { in huge_pmd_share()
5479 pud_populate(mm, pud, in huge_pmd_share()
5487 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share()
5508 pud_t *pud = pud_offset(p4d, *addr); in huge_pmd_unshare() local
5515 pud_clear(pud); in huge_pmd_unshare()
5531 unsigned long addr, pud_t *pud) in huge_pmd_share() argument
5559 pud_t *pud; in huge_pte_alloc() local
5566 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc()
5567 if (pud) { in huge_pte_alloc()
[all …]
Dmapping_dirty_helpers.c153 static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, in wp_clean_pud_entry() argument
156 pud_t pudval = READ_ONCE(*pud); in wp_clean_pud_entry()
Dmemory-failure.c298 pud_t *pud; in dev_pagemap_mapping_shift() local
308 pud = pud_offset(p4d, address); in dev_pagemap_mapping_shift()
309 if (!pud_present(*pud)) in dev_pagemap_mapping_shift()
311 if (pud_devmap(*pud)) in dev_pagemap_mapping_shift()
313 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift()
Drmap.c745 pud_t *pud; in mm_find_pmd() local
757 pud = pud_offset(p4d, address); in mm_find_pmd()
758 if (!pud_present(*pud)) in mm_find_pmd()
761 pmd = pmd_offset(pud, address); in mm_find_pmd()
Dswapfile.c2052 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, in unuse_pmd_range() argument
2061 pmd = pmd_offset(pud, addr); in unuse_pmd_range()
2080 pud_t *pud; in unuse_pud_range() local
2084 pud = pud_offset(p4d, addr); in unuse_pud_range()
2087 if (pud_none_or_clear_bad(pud)) in unuse_pud_range()
2089 ret = unuse_pmd_range(vma, pud, addr, next, type, in unuse_pud_range()
2093 } while (pud++, addr = next, addr != end); in unuse_pud_range()