Home
last modified time | relevance | path

Searched refs:pmd (Results 1 – 25 of 28) sorted by relevance

12

/mm/
Dhuge_memory.c492 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
495 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
496 return pmd; in maybe_pmd_mkwrite()
614 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
615 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
642 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
700 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
704 if (!pmd_none(*pmd)) in set_huge_zero_page()
709 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
[all …]
Dmemory.c214 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument
217 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range()
218 pmd_clear(pmd); in free_pte_range()
227 pmd_t *pmd; in free_pmd_range() local
232 pmd = pmd_offset(pud, addr); in free_pmd_range()
235 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range()
237 free_pte_range(tlb, pmd, addr); in free_pmd_range()
238 } while (pmd++, addr = next, addr != end); in free_pmd_range()
251 pmd = pmd_offset(pud, start); in free_pmd_range()
253 pmd_free_tlb(tlb, pmd, start); in free_pmd_range()
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte()
148 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk()
176 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk()
182 pmde = READ_ONCE(*pvmw->pmd); in page_vma_mapped_walk()
184 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk()
185 if (likely(pmd_trans_huge(*pvmw->pmd))) { in page_vma_mapped_walk()
188 if (pmd_page(*pvmw->pmd) != page) in page_vma_mapped_walk()
191 } else if (!pmd_present(*pvmw->pmd)) { in page_vma_mapped_walk()
195 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { in page_vma_mapped_walk()
[all …]
Dgup.c174 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument
183 if (unlikely(pmd_bad(*pmd))) in follow_page_pte()
186 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
203 migration_entry_wait(mm, pmd, address); in follow_page_pte()
313 pmd_t *pmd, pmdval; in follow_pmd_mask() local
318 pmd = pmd_offset(pudp, address); in follow_pmd_mask()
323 pmdval = READ_ONCE(*pmd); in follow_pmd_mask()
327 page = follow_huge_pmd(mm, address, pmd, flags); in follow_pmd_mask()
347 pmd_migration_entry_wait(mm, pmd); in follow_pmd_mask()
348 pmdval = READ_ONCE(*pmd); in follow_pmd_mask()
[all …]
Dsparse-vmemmap.c143 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument
145 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate()
170 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() local
171 if (pmd_none(*pmd)) { in vmemmap_pmd_populate()
175 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate()
177 return pmd; in vmemmap_pmd_populate()
223 pmd_t *pmd; in vmemmap_populate_basepages() local
236 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages()
237 if (!pmd) in vmemmap_populate_basepages()
239 pte = vmemmap_pte_populate(pmd, addr, node); in vmemmap_populate_basepages()
Dmprotect.c38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
53 if (pmd_trans_unstable(pmd)) in change_pte_range()
61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
168 pmd_t *pmd; in change_pmd_range() local
176 pmd = pmd_offset(pud, addr); in change_pmd_range()
181 if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) in change_pmd_range()
182 && pmd_none_or_clear_bad(pmd)) in change_pmd_range()
193 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { in change_pmd_range()
195 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
197 int nr_ptes = change_huge_pmd(vma, pmd, addr, in change_pmd_range()
[all …]
Dpgtable-generic.c39 void pmd_clear_bad(pmd_t *pmd) in pmd_clear_bad() argument
41 pmd_ERROR(*pmd); in pmd_clear_bad()
42 pmd_clear(pmd); in pmd_clear_bad()
127 pmd_t pmd; in pmdp_huge_clear_flush() local
131 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush()
133 return pmd; in pmdp_huge_clear_flush()
202 pmd_t pmd; in pmdp_collapse_flush() local
206 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
210 return pmd; in pmdp_collapse_flush()
Dmadvise.c183 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument
190 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in swapin_walk_pmd_entry()
199 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
300 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, in madvise_cold_or_pageout_pte_range() argument
318 if (pmd_trans_huge(*pmd)) { in madvise_cold_or_pageout_pte_range()
323 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range()
327 orig_pmd = *pmd; in madvise_cold_or_pageout_pte_range()
356 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range()
359 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_cold_or_pageout_pte_range()
360 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range()
[all …]
Dkhugepaged.c892 unsigned long address, pmd_t *pmd, in __collapse_huge_page_swapin() argument
901 .pmd = pmd, in __collapse_huge_page_swapin()
910 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin()
928 if (mm_find_pmd(mm, address) != pmd) { in __collapse_huge_page_swapin()
938 vmf.pte = pte_offset_map(pmd, vmf.address); in __collapse_huge_page_swapin()
951 pmd_t *pmd, _pmd; in collapse_huge_page() local
993 pmd = mm_find_pmd(mm, address); in collapse_huge_page()
994 if (!pmd) { in collapse_huge_page()
1006 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { in collapse_huge_page()
1026 if (mm_find_pmd(mm, address) != pmd) in collapse_huge_page()
[all …]
Dpagewalk.c7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument
14 pte = pte_offset_map(pmd, addr); in walk_pte_range()
32 pmd_t *pmd; in walk_pmd_range() local
37 pmd = pmd_offset(pud, addr); in walk_pmd_range()
41 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range()
53 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
64 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range()
65 if (pmd_trans_unstable(pmd)) in walk_pmd_range()
67 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range()
70 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
Dhmm.c395 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) in pmd_to_hmm_pfn_flags() argument
397 if (pmd_protnone(pmd)) in pmd_to_hmm_pfn_flags()
399 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | in pmd_to_hmm_pfn_flags()
406 unsigned long end, uint64_t *pfns, pmd_t pmd) in hmm_vma_handle_pmd() argument
415 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd()
419 if (pmd_protnone(pmd) || fault || write_fault) in hmm_vma_handle_pmd()
422 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in hmm_vma_handle_pmd()
424 if (pmd_devmap(pmd)) { in hmm_vma_handle_pmd()
442 unsigned long end, uint64_t *pfns, pmd_t pmd);
562 pmd_t pmd; in hmm_vma_walk_pmd() local
[all …]
Dmremap.c38 pmd_t *pmd; in get_old_pmd() local
52 pmd = pmd_offset(pud, addr); in get_old_pmd()
53 if (pmd_none(*pmd)) in get_old_pmd()
56 return pmd; in get_old_pmd()
65 pmd_t *pmd; in alloc_new_pmd() local
75 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd()
76 if (!pmd) in alloc_new_pmd()
79 VM_BUG_ON(pmd_trans_huge(*pmd)); in alloc_new_pmd()
81 return pmd; in alloc_new_pmd()
201 pmd_t pmd; in move_normal_pmd() local
[all …]
Dmincore.c122 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument
131 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range()
138 if (pmd_trans_unstable(pmd)) { in mincore_pte_range()
143 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
Drmap.c716 pmd_t *pmd = NULL; in mm_find_pmd() local
731 pmd = pmd_offset(pud, address); in mm_find_pmd()
737 pmde = *pmd; in mm_find_pmd()
740 pmd = NULL; in mm_find_pmd()
742 return pmd; in mm_find_pmd()
790 pvmw.pmd)) in page_referenced_one()
924 pmd_t *pmd = pvmw.pmd; in page_mkclean_one() local
927 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) in page_mkclean_one()
931 entry = pmdp_invalidate(vma, address, pmd); in page_mkclean_one()
934 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
Dvmalloc.c63 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument
67 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
76 pmd_t *pmd; in vunmap_pmd_range() local
79 pmd = pmd_offset(pud, addr); in vunmap_pmd_range()
82 if (pmd_clear_huge(pmd)) in vunmap_pmd_range()
84 if (pmd_none_or_clear_bad(pmd)) in vunmap_pmd_range()
86 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range()
87 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
137 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument
147 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range()
[all …]
Dmigrate.c339 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, in migration_entry_wait() argument
342 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait()
343 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait()
355 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) in pmd_migration_entry_wait() argument
360 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait()
361 if (!is_pmd_migration_entry(*pmd)) in pmd_migration_entry_wait()
363 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
1937 bool pmd_trans_migrating(pmd_t pmd) in pmd_trans_migrating() argument
1939 struct page *page = pmd_page(pmd); in pmd_trans_migrating()
2005 pmd_t *pmd, pmd_t entry, in migrate_misplaced_transhuge_page() argument
[all …]
Dswapfile.c1853 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte() argument
1873 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte()
1909 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte_range() argument
1923 pte = pte_offset_map(pmd, addr); in unuse_pte_range()
1942 vmf.pmd = pmd; in unuse_pte_range()
1952 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range()
1968 pte = pte_offset_map(pmd, addr); in unuse_pte_range()
1982 pmd_t *pmd; in unuse_pmd_range() local
1986 pmd = pmd_offset(pud, addr); in unuse_pmd_range()
1990 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in unuse_pmd_range()
[all …]
Dmempolicy.c441 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, in queue_pages_pmd() argument
449 if (unlikely(is_pmd_migration_entry(*pmd))) { in queue_pages_pmd()
453 page = pmd_page(*pmd); in queue_pages_pmd()
456 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); in queue_pages_pmd()
490 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, in queue_pages_pte_range() argument
502 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range()
504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); in queue_pages_pte_range()
510 if (pmd_trans_unstable(pmd)) in queue_pages_pte_range()
513 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
Dmemory-failure.c271 pmd_t *pmd; in dev_pagemap_mapping_shift() local
285 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift()
286 if (!pmd_present(*pmd)) in dev_pagemap_mapping_shift()
288 if (pmd_devmap(*pmd)) in dev_pagemap_mapping_shift()
290 pte = pte_offset_map(pmd, address); in dev_pagemap_mapping_shift()
Dmemcontrol.c5589 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument
5594 if (unlikely(is_swap_pmd(pmd))) { in get_mctgt_type_thp()
5596 !is_pmd_migration_entry(pmd)); in get_mctgt_type_thp()
5599 page = pmd_page(pmd); in get_mctgt_type_thp()
5614 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument
5620 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, in mem_cgroup_count_precharge_pte_range() argument
5628 ptl = pmd_trans_huge_lock(pmd, vma); in mem_cgroup_count_precharge_pte_range()
5635 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) in mem_cgroup_count_precharge_pte_range()
5641 if (pmd_trans_unstable(pmd)) in mem_cgroup_count_precharge_pte_range()
5643 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range()
[all …]
Dhugetlb.c5020 pmd_t *pmd; in huge_pte_offset() local
5036 pmd = pmd_offset(pud, addr); in huge_pte_offset()
5037 if (sz != PMD_SIZE && pmd_none(*pmd)) in huge_pte_offset()
5040 if (pmd_huge(*pmd) || !pmd_present(*pmd)) in huge_pte_offset()
5041 return (pte_t *)pmd; in huge_pte_offset()
5069 pmd_t *pmd, int flags) in follow_huge_pmd() argument
5075 ptl = pmd_lockptr(mm, pmd); in follow_huge_pmd()
5081 if (!pmd_huge(*pmd)) in follow_huge_pmd()
5083 pte = huge_ptep_get((pte_t *)pmd); in follow_huge_pmd()
5085 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); in follow_huge_pmd()
[all …]
Dpage_idle.c75 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
Dksm.c1123 pmd_t *pmd; in replace_page() local
1135 pmd = mm_find_pmd(mm, addr); in replace_page()
1136 if (!pmd) in replace_page()
1143 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
/mm/kasan/
Dinit.c74 static inline bool kasan_pte_table(pmd_t pmd) in kasan_pte_table() argument
76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); in kasan_pte_table()
96 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, in zero_pte_populate() argument
99 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
109 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
116 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate() local
123 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate()
128 if (pmd_none(*pmd)) { in zero_pmd_populate()
138 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate()
140 zero_pte_populate(pmd, addr, next); in zero_pmd_populate()
[all …]
Dcommon.c644 pmd_t *pmd; in shadow_mapped() local
663 pmd = pmd_offset(pud, addr); in shadow_mapped()
664 if (pmd_none(*pmd)) in shadow_mapped()
667 if (pmd_bad(*pmd)) in shadow_mapped()
669 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()

12