Home
last modified time | relevance | path

Searched refs:pte (Results 1 – 25 of 38) sorted by relevance

12

/mm/
Ddebug_vm_pgtable.c97 pte_t pte = pfn_pte(args->fixed_pte_pfn, prot); in pte_basic_tests() local
109 WARN_ON(pte_dirty(pte_wrprotect(pte))); in pte_basic_tests()
111 WARN_ON(!pte_same(pte, pte)); in pte_basic_tests()
112 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); in pte_basic_tests()
113 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); in pte_basic_tests()
114 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); in pte_basic_tests()
115 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); in pte_basic_tests()
116 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); in pte_basic_tests()
117 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); in pte_basic_tests()
118 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); in pte_basic_tests()
[all …]
Dmemory.c572 pte_t pte, struct page *page) in print_bad_pte() argument
608 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
665 pte_t pte) in vm_normal_page() argument
667 unsigned long pfn = pte_pfn(pte); in vm_normal_page()
670 if (likely(!pte_special(pte))) in vm_normal_page()
678 if (pte_devmap(pte)) in vm_normal_page()
681 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
707 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
765 pte_t pte; in restore_exclusive_pte() local
768 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in restore_exclusive_pte()
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
21 if (!is_swap_pte(*pvmw->pte)) in map_pte()
39 if (is_swap_pte(*pvmw->pte)) { in map_pte()
43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte()
47 } else if (!pte_present(*pvmw->pte)) in map_pte()
93 if (!is_swap_pte(*pvmw->pte)) in check_pte()
95 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
102 } else if (is_swap_pte(*pvmw->pte)) { in check_pte()
106 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
113 if (!pte_present(*pvmw->pte)) in check_pte()
[all …]
Dsparse-vmemmap.c48 void (*remap_pte)(pte_t *pte, unsigned long addr,
71 pte_t entry, *pte; in split_vmemmap_huge_pmd() local
75 pte = pte_offset_kernel(&__pmd, addr); in split_vmemmap_huge_pmd()
76 set_pte_at(&init_mm, addr, pte, entry); in split_vmemmap_huge_pmd()
92 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_range() local
99 walk->reuse_page = pte_page(*pte); in vmemmap_pte_range()
105 pte++; in vmemmap_pte_range()
109 for (; addr != end; addr += PAGE_SIZE, pte++) { in vmemmap_pte_range()
110 walk->remap_pte(pte, addr, walk); in vmemmap_pte_range()
233 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, in vmemmap_remap_pte() argument
[all …]
Drmap.c814 if (pvmw.pte) { in page_referenced_one()
816 if (lru_gen_enabled() && pte_young(*pvmw.pte) && in page_referenced_one()
823 pvmw.pte)) { in page_referenced_one()
960 if (pvmw.pte) { in page_mkclean_one()
962 pte_t *pte = pvmw.pte; in page_mkclean_one() local
964 if (!pte_dirty(*pte) && !pte_write(*pte)) in page_mkclean_one()
967 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
968 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
971 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
1549 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
[all …]
Dhmm.c216 pte_t pte) in pte_to_hmm_pfn_flags() argument
218 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) in pte_to_hmm_pfn_flags()
220 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; in pte_to_hmm_pfn_flags()
231 pte_t pte = *ptep; in hmm_vma_handle_pte() local
234 if (pte_none(pte)) { in hmm_vma_handle_pte()
243 if (!pte_present(pte)) { in hmm_vma_handle_pte()
244 swp_entry_t entry = pte_to_swp_entry(pte); in hmm_vma_handle_pte()
288 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
300 if (!vm_normal_page(walk->vma, addr, pte) && in hmm_vma_handle_pte()
301 !pte_devmap(pte) && in hmm_vma_handle_pte()
[all …]
Dpagewalk.c23 static int walk_pte_range_inner(pte_t *pte, unsigned long addr, in walk_pte_range_inner() argument
30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner()
36 pte++; in walk_pte_range_inner()
44 pte_t *pte; in walk_pte_range() local
49 pte = pte_offset_map(pmd, addr); in walk_pte_range()
50 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
51 pte_unmap(pte); in walk_pte_range()
53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in walk_pte_range()
54 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
55 pte_unmap_unlock(pte, ptl); in walk_pte_range()
[all …]
Dmigrate.c191 pte_t pte; in remove_migration_pte() local
204 if (!pvmw.pte) { in remove_migration_pte()
212 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte()
213 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte()
214 pte = pte_mksoft_dirty(pte); in remove_migration_pte()
219 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte()
221 pte = maybe_mkwrite(pte, vma); in remove_migration_pte()
222 else if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte()
223 pte = pte_mkuffd_wp(pte); in remove_migration_pte()
226 if (pte_write(pte)) in remove_migration_pte()
[all …]
Dgup.c465 pte_t *pte, unsigned int flags) in follow_pfn_pte() argument
472 pte_t entry = *pte; in follow_pfn_pte()
478 if (!pte_same(*pte, entry)) { in follow_pfn_pte()
479 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
480 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
492 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) in can_follow_write_pte() argument
494 return pte_write(pte) || in can_follow_write_pte()
495 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); in can_follow_write_pte()
505 pte_t *ptep, pte; in follow_page_pte() local
529 pte = *ptep; in follow_page_pte()
[all …]
Dmadvise.c206 pte_t pte; in swapin_walk_pmd_entry() local
212 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
215 if (pte_present(pte) || pte_none(pte)) in swapin_walk_pmd_entry()
217 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry()
326 pte_t *orig_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local
412 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
415 for (; addr < end; pte++, addr += PAGE_SIZE) { in madvise_cold_or_pageout_pte_range()
416 ptent = *pte; in madvise_cold_or_pageout_pte_range()
451 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
452 pte--; in madvise_cold_or_pageout_pte_range()
[all …]
Dkhugepaged.c570 static void release_pte_pages(pte_t *pte, pte_t *_pte, in release_pte_pages() argument
575 while (--_pte >= pte) { in release_pte_pages()
603 pte_t *pte, in __collapse_huge_page_isolate() argument
611 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; in __collapse_huge_page_isolate()
737 release_pte_pages(pte, _pte, compound_pagelist); in __collapse_huge_page_isolate()
743 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, in __collapse_huge_page_copy() argument
751 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_copy()
1026 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin()
1027 vmf.orig_pte = *vmf.pte; in __collapse_huge_page_swapin()
1029 pte_unmap(vmf.pte); in __collapse_huge_page_swapin()
[all …]
Dmapping_dirty_helpers.c33 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, in wp_pte() argument
37 pte_t ptent = *pte; in wp_pte()
40 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in wp_pte()
43 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte()
88 static int clean_record_pte(pte_t *pte, unsigned long addr, in clean_record_pte() argument
93 pte_t ptent = *pte; in clean_record_pte()
98 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in clean_record_pte()
101 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
Dmincore.c24 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, in mincore_hugetlb() argument
35 present = pte && !huge_pte_none(huge_ptep_get(pte)); in mincore_hugetlb()
122 pte_t pte = *ptep; in mincore_pte_range() local
124 if (pte_none(pte)) in mincore_pte_range()
127 else if (pte_present(pte)) in mincore_pte_range()
130 swp_entry_t entry = pte_to_swp_entry(pte); in mincore_pte_range()
Dmremap.c119 static pte_t move_soft_dirty_pte(pte_t pte) in move_soft_dirty_pte() argument
126 if (pte_present(pte)) in move_soft_dirty_pte()
127 pte = pte_mksoft_dirty(pte); in move_soft_dirty_pte()
128 else if (is_swap_pte(pte)) in move_soft_dirty_pte()
129 pte = pte_swp_mksoft_dirty(pte); in move_soft_dirty_pte()
131 return pte; in move_soft_dirty_pte()
140 pte_t *old_pte, *new_pte, pte; in move_ptes() local
183 pte = ptep_get_and_clear(mm, old_addr, old_pte); in move_ptes()
195 if (pte_present(pte)) in move_ptes()
197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); in move_ptes()
[all …]
Dmprotect.c43 pte_t *pte, oldpte; in change_pte_range() local
68 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
78 oldpte = *pte; in change_pte_range()
119 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
143 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
191 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
195 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range()
197 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
386 static int prot_none_pte_entry(pte_t *pte, unsigned long addr, in prot_none_pte_entry() argument
389 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? in prot_none_pte_entry()
[all …]
Dhugetlb.c4252 bool is_hugetlb_entry_migration(pte_t pte) in is_hugetlb_entry_migration() argument
4256 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_migration()
4258 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_migration()
4265 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) in is_hugetlb_entry_hwpoisoned() argument
4269 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_hwpoisoned()
4271 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_hwpoisoned()
4456 pte_t pte; in __unmap_hugepage_range() local
4496 pte = huge_ptep_get(ptep); in __unmap_hugepage_range()
4497 if (huge_pte_none(pte)) { in __unmap_hugepage_range()
4506 if (unlikely(!pte_present(pte))) { in __unmap_hugepage_range()
[all …]
Dswap_state.c719 pte_t *pte, *orig_pte; in swap_ra_info() local
733 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info()
763 pte -= ra_info->offset; in swap_ra_info()
765 ra_info->ptes = pte; in swap_ra_info()
769 *tpte++ = *pte++; in swap_ra_info()
794 pte_t *pte, pentry; in swap_vma_readahead() local
807 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; in swap_vma_readahead()
808 i++, pte++) { in swap_vma_readahead()
809 pentry = *pte; in swap_vma_readahead()
Dpgtable-generic.c94 pte_t pte; in ptep_clear_flush() local
95 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
96 if (pte_accessible(mm, pte)) in ptep_clear_flush()
98 return pte; in ptep_clear_flush()
Dvmalloc.c104 pte_t *pte; in vmap_pte_range() local
109 pte = pte_alloc_kernel_track(pmd, addr, mask); in vmap_pte_range()
110 if (!pte) in vmap_pte_range()
113 BUG_ON(!pte_none(*pte)); in vmap_pte_range()
122 set_huge_pte_at(&init_mm, addr, pte, entry); in vmap_pte_range()
127 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); in vmap_pte_range()
129 } while (pte += PFN_DOWN(size), addr += size, addr != end); in vmap_pte_range()
334 pte_t *pte; in vunmap_pte_range() local
336 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range()
338 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range()
[all …]
Dmlock.c354 pte_t *pte; in __munlock_pagevec_fill() local
362 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
373 pte++; in __munlock_pagevec_fill()
374 if (pte_present(*pte)) in __munlock_pagevec_fill()
375 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
399 pte_unmap_unlock(pte, ptl); in __munlock_pagevec_fill()
/mm/damon/
Dvaddr.c373 pte_t *pte; in damon_mkold_pmd_entry() local
393 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in damon_mkold_pmd_entry()
394 if (!pte_present(*pte)) in damon_mkold_pmd_entry()
396 damon_ptep_mkold(pte, walk->vma, addr); in damon_mkold_pmd_entry()
398 pte_unmap_unlock(pte, ptl); in damon_mkold_pmd_entry()
403 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, in damon_hugetlb_mkold() argument
407 pte_t entry = huge_ptep_get(pte); in damon_hugetlb_mkold()
418 set_huge_pte_at(mm, addr, pte, entry); in damon_hugetlb_mkold()
434 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, in damon_mkold_hugetlb_entry() argument
442 ptl = huge_pte_lock(h, walk->mm, pte); in damon_mkold_hugetlb_entry()
[all …]
Dprmtv-common.c36 void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) in damon_ptep_mkold() argument
39 struct page *page = damon_get_page(pte_pfn(*pte)); in damon_ptep_mkold()
44 if (ptep_test_and_clear_young(vma, addr, pte)) in damon_ptep_mkold()
Dpaddr.c30 if (pvmw.pte) in __damon_pa_mkold()
31 damon_ptep_mkold(pvmw.pte, vma, addr); in __damon_pa_mkold()
106 if (pvmw.pte) { in __damon_pa_young()
107 result->accessed = pte_young(*pvmw.pte) || in __damon_pa_young()
/mm/kasan/
Dinit.c75 static inline bool kasan_early_shadow_page_entry(pte_t pte) in kasan_early_shadow_page_entry() argument
77 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); in kasan_early_shadow_page_entry()
95 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() local
103 set_pte_at(&init_mm, addr, pte, zero_pte); in zero_pte_populate()
105 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
284 pte_t *pte; in kasan_free_pte() local
288 pte = pte_start + i; in kasan_free_pte()
289 if (!pte_none(*pte)) in kasan_free_pte()
342 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, in kasan_remove_pte_table() argument
347 for (; addr < end; addr = next, pte++) { in kasan_remove_pte_table()
[all …]
Dshadow.c152 pte_t *pte; in shadow_mapped() local
176 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()
177 return !pte_none(*pte); in shadow_mapped()
261 pte_t pte; in kasan_populate_vmalloc_pte() local
271 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); in kasan_populate_vmalloc_pte()
275 set_pte_at(&init_mm, addr, ptep, pte); in kasan_populate_vmalloc_pte()

12