Home
last modified time | relevance | path

Searched refs:pte (Results 1 – 25 of 38) sorted by relevance

12

/mm/
Ddebug_vm_pgtable.c64 pte_t pte = pfn_pte(pfn, prot); in pte_basic_tests() local
76 WARN_ON(pte_dirty(pte_wrprotect(pte))); in pte_basic_tests()
78 WARN_ON(!pte_same(pte, pte)); in pte_basic_tests()
79 WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte)))); in pte_basic_tests()
80 WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte)))); in pte_basic_tests()
81 WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte)))); in pte_basic_tests()
82 WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte)))); in pte_basic_tests()
83 WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte)))); in pte_basic_tests()
84 WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte)))); in pte_basic_tests()
85 WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte)))); in pte_basic_tests()
[all …]
Dpage_vma_mapped.c18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
21 if (!is_swap_pte(*pvmw->pte)) in map_pte()
39 if (is_swap_pte(*pvmw->pte)) { in map_pte()
43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte()
46 } else if (!pte_present(*pvmw->pte)) in map_pte()
91 if (!is_swap_pte(*pvmw->pte)) in check_pte()
93 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
99 } else if (is_swap_pte(*pvmw->pte)) { in check_pte()
103 entry = pte_to_swp_entry(*pvmw->pte); in check_pte()
109 if (!pte_present(*pvmw->pte)) in check_pte()
[all …]
Dmemory.c546 pte_t pte, struct page *page) in print_bad_pte() argument
582 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
640 pte_t pte, unsigned long vma_flags) in _vm_normal_page() argument
642 unsigned long pfn = pte_pfn(pte); in _vm_normal_page()
645 if (likely(!pte_special(pte))) in _vm_normal_page()
653 if (pte_devmap(pte)) in _vm_normal_page()
656 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
686 print_bad_pte(vma, addr, pte, NULL); in _vm_normal_page()
752 pte_t pte = *src_pte; in copy_nonpresent_pte() local
754 swp_entry_t entry = pte_to_swp_entry(pte); in copy_nonpresent_pte()
[all …]
Dgup.c392 pte_t *pte, unsigned int flags) in follow_pfn_pte() argument
399 pte_t entry = *pte; in follow_pfn_pte()
405 if (!pte_same(*pte, entry)) { in follow_pfn_pte()
406 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
407 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
419 static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) in can_follow_write_pte() argument
421 return pte_write(pte) || in can_follow_write_pte()
422 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); in can_follow_write_pte()
432 pte_t *ptep, pte; in follow_page_pte() local
456 pte = *ptep; in follow_page_pte()
[all …]
Dhmm.c222 pte_t pte) in pte_to_hmm_pfn_flags() argument
224 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) in pte_to_hmm_pfn_flags()
226 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; in pte_to_hmm_pfn_flags()
237 pte_t pte = *ptep; in hmm_vma_handle_pte() local
240 if (pte_none(pte)) { in hmm_vma_handle_pte()
249 if (!pte_present(pte)) { in hmm_vma_handle_pte()
250 swp_entry_t entry = pte_to_swp_entry(pte); in hmm_vma_handle_pte()
287 cpu_flags = pte_to_hmm_pfn_flags(range, pte); in hmm_vma_handle_pte()
299 if (!vm_normal_page(walk->vma, addr, pte) && in hmm_vma_handle_pte()
300 !pte_devmap(pte) && in hmm_vma_handle_pte()
[all …]
Dmadvise.c200 pte_t pte; in swapin_walk_pmd_entry() local
206 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
209 if (pte_present(pte) || pte_none(pte)) in swapin_walk_pmd_entry()
211 entry = pte_to_swp_entry(pte); in swapin_walk_pmd_entry()
320 pte_t *orig_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local
407 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
410 for (; addr < end; pte++, addr += PAGE_SIZE) { in madvise_cold_or_pageout_pte_range()
411 ptent = *pte; in madvise_cold_or_pageout_pte_range()
450 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
451 pte--; in madvise_cold_or_pageout_pte_range()
[all …]
Dmigrate.c193 pte_t pte; in remove_migration_pte() local
206 if (!pvmw.pte) { in remove_migration_pte()
214 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte()
215 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte()
216 pte = pte_mksoft_dirty(pte); in remove_migration_pte()
221 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte()
223 pte = maybe_mkwrite(pte, vma->vm_flags); in remove_migration_pte()
224 else if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte()
225 pte = pte_mkuffd_wp(pte); in remove_migration_pte()
228 entry = make_device_private_entry(new, pte_write(pte)); in remove_migration_pte()
[all …]
Dsparse-vmemmap.c132 void __meminit vmemmap_verify(pte_t *pte, int node, in vmemmap_verify() argument
135 unsigned long pfn = pte_pfn(*pte); in vmemmap_verify()
146 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() local
147 if (pte_none(*pte)) { in vmemmap_pte_populate()
155 set_pte_at(&init_mm, addr, pte, entry); in vmemmap_pte_populate()
157 return pte; in vmemmap_pte_populate()
227 pte_t *pte; in vmemmap_populate_basepages() local
242 pte = vmemmap_pte_populate(pmd, addr, node, altmap); in vmemmap_populate_basepages()
243 if (!pte) in vmemmap_populate_basepages()
245 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); in vmemmap_populate_basepages()
Dpagewalk.c23 static int walk_pte_range_inner(pte_t *pte, unsigned long addr, in walk_pte_range_inner() argument
30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner()
36 pte++; in walk_pte_range_inner()
44 pte_t *pte; in walk_pte_range() local
49 pte = pte_offset_map(pmd, addr); in walk_pte_range()
50 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
51 pte_unmap(pte); in walk_pte_range()
53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in walk_pte_range()
54 err = walk_pte_range_inner(pte, addr, end, walk); in walk_pte_range()
55 pte_unmap_unlock(pte, ptl); in walk_pte_range()
[all …]
Dkhugepaged.c568 static void release_pte_pages(pte_t *pte, pte_t *_pte, in release_pte_pages() argument
573 while (--_pte >= pte) { in release_pte_pages()
601 pte_t *pte, in __collapse_huge_page_isolate() argument
609 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; in __collapse_huge_page_isolate()
735 release_pte_pages(pte, _pte, compound_pagelist); in __collapse_huge_page_isolate()
741 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, in __collapse_huge_page_copy() argument
749 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_copy()
1026 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin()
1027 vmf.orig_pte = *vmf.pte; in __collapse_huge_page_swapin()
1029 pte_unmap(vmf.pte); in __collapse_huge_page_swapin()
[all …]
Drmap.c804 if (pvmw.pte) { in page_referenced_one()
807 pvmw.pte)) { in page_referenced_one()
945 if (pvmw.pte) { in page_mkclean_one()
947 pte_t *pte = pvmw.pte; in page_mkclean_one() local
949 if (!pte_dirty(*pte) && !pte_write(*pte)) in page_mkclean_one()
952 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
953 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
956 set_pte_at(vma->vm_mm, address, pte, entry); in page_mkclean_one()
1510 if (!pvmw.pte && (flags & TTU_MIGRATION)) { in try_to_unmap_one()
1542 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
[all …]
Dmapping_dirty_helpers.c32 static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, in wp_pte() argument
36 pte_t ptent = *pte; in wp_pte()
39 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in wp_pte()
42 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte()
86 static int clean_record_pte(pte_t *pte, unsigned long addr, in clean_record_pte() argument
91 pte_t ptent = *pte; in clean_record_pte()
96 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in clean_record_pte()
99 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
Dmincore.c24 static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, in mincore_hugetlb() argument
35 present = pte && !huge_pte_none(huge_ptep_get(pte)); in mincore_hugetlb()
122 pte_t pte = *ptep; in mincore_pte_range() local
124 if (pte_none(pte)) in mincore_pte_range()
127 else if (pte_present(pte)) in mincore_pte_range()
130 swp_entry_t entry = pte_to_swp_entry(pte); in mincore_pte_range()
Dmremap.c119 static pte_t move_soft_dirty_pte(pte_t pte) in move_soft_dirty_pte() argument
126 if (pte_present(pte)) in move_soft_dirty_pte()
127 pte = pte_mksoft_dirty(pte); in move_soft_dirty_pte()
128 else if (is_swap_pte(pte)) in move_soft_dirty_pte()
129 pte = pte_swp_mksoft_dirty(pte); in move_soft_dirty_pte()
131 return pte; in move_soft_dirty_pte()
140 pte_t *old_pte, *new_pte, pte; in move_ptes() local
183 pte = ptep_get_and_clear(mm, old_addr, old_pte); in move_ptes()
195 if (pte_present(pte)) in move_ptes()
197 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); in move_ptes()
[all …]
Dmprotect.c42 pte_t *pte, oldpte; in change_pte_range() local
65 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
75 oldpte = *pte; in change_pte_range()
116 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
140 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
176 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
180 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range()
182 pte_unmap_unlock(pte - 1, ptl); in change_pte_range()
371 static int prot_none_pte_entry(pte_t *pte, unsigned long addr, in prot_none_pte_entry() argument
374 return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? in prot_none_pte_entry()
[all …]
Dhugetlb.c3789 bool is_hugetlb_entry_migration(pte_t pte) in is_hugetlb_entry_migration() argument
3793 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_migration()
3795 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_migration()
3802 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) in is_hugetlb_entry_hwpoisoned() argument
3806 if (huge_pte_none(pte) || pte_present(pte)) in is_hugetlb_entry_hwpoisoned()
3808 swp = pte_to_swp_entry(pte); in is_hugetlb_entry_hwpoisoned()
3933 pte_t pte; in __unmap_hugepage_range() local
3973 pte = huge_ptep_get(ptep); in __unmap_hugepage_range()
3974 if (huge_pte_none(pte)) { in __unmap_hugepage_range()
3983 if (unlikely(!pte_present(pte))) { in __unmap_hugepage_range()
[all …]
Dioremap.c68 pte_t *pte; in ioremap_pte_range() local
72 pte = pte_alloc_kernel_track(pmd, addr, mask); in ioremap_pte_range()
73 if (!pte) in ioremap_pte_range()
76 BUG_ON(!pte_none(*pte)); in ioremap_pte_range()
77 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); in ioremap_pte_range()
79 } while (pte++, addr += PAGE_SIZE, addr != end); in ioremap_pte_range()
Dswap_state.c763 pte_t *pte, *orig_pte; in swap_ra_info() local
777 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info()
778 entry = pte_to_swp_entry(*pte); in swap_ra_info()
812 pte -= ra_info->offset; in swap_ra_info()
814 ra_info->ptes = pte; in swap_ra_info()
818 *tpte++ = *pte++; in swap_ra_info()
843 pte_t *pte, pentry; in swap_vma_readahead() local
854 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; in swap_vma_readahead()
855 i++, pte++) { in swap_vma_readahead()
856 pentry = *pte; in swap_vma_readahead()
Dpgtable-generic.c94 pte_t pte; in ptep_clear_flush() local
95 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush()
96 if (pte_accessible(mm, pte)) in ptep_clear_flush()
98 return pte; in ptep_clear_flush()
Dmlock.c384 pte_t *pte; in __munlock_pagevec_fill() local
392 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
403 pte++; in __munlock_pagevec_fill()
404 if (pte_present(*pte)) in __munlock_pagevec_fill()
405 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
429 pte_unmap_unlock(pte, ptl); in __munlock_pagevec_fill()
/mm/damon/
Dvaddr.c373 pte_t *pte; in damon_mkold_pmd_entry() local
393 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in damon_mkold_pmd_entry()
394 if (!pte_present(*pte)) in damon_mkold_pmd_entry()
396 damon_ptep_mkold(pte, walk->mm, addr); in damon_mkold_pmd_entry()
398 pte_unmap_unlock(pte, ptl); in damon_mkold_pmd_entry()
403 static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm, in damon_hugetlb_mkold() argument
407 pte_t entry = huge_ptep_get(pte); in damon_hugetlb_mkold()
418 set_huge_pte_at(mm, addr, pte, entry); in damon_hugetlb_mkold()
434 static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, in damon_mkold_hugetlb_entry() argument
442 ptl = huge_pte_lock(h, walk->mm, pte); in damon_mkold_hugetlb_entry()
[all …]
Dprmtv-common.c36 void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) in damon_ptep_mkold() argument
39 struct page *page = damon_get_page(pte_pfn(*pte)); in damon_ptep_mkold()
44 if (pte_young(*pte)) { in damon_ptep_mkold()
46 *pte = pte_mkold(*pte); in damon_ptep_mkold()
Dpaddr.c30 if (pvmw.pte) in __damon_pa_mkold()
31 damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); in __damon_pa_mkold()
106 if (pvmw.pte) { in __damon_pa_young()
107 result->accessed = pte_young(*pvmw.pte) || in __damon_pa_young()
/mm/kasan/
Dinit.c75 static inline bool kasan_early_shadow_page_entry(pte_t pte) in kasan_early_shadow_page_entry() argument
77 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page)); in kasan_early_shadow_page_entry()
95 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() local
103 set_pte_at(&init_mm, addr, pte, zero_pte); in zero_pte_populate()
105 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate()
284 pte_t *pte; in kasan_free_pte() local
288 pte = pte_start + i; in kasan_free_pte()
289 if (!pte_none(*pte)) in kasan_free_pte()
342 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, in kasan_remove_pte_table() argument
347 for (; addr < end; addr = next, pte++) { in kasan_remove_pte_table()
[all …]
Dshadow.c146 pte_t *pte; in shadow_mapped() local
170 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()
171 return !pte_none(*pte); in shadow_mapped()
255 pte_t pte; in kasan_populate_vmalloc_pte() local
265 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); in kasan_populate_vmalloc_pte()
269 set_pte_at(&init_mm, addr, ptep, pte); in kasan_populate_vmalloc_pte()

12