/kernel/linux/linux-5.10/arch/arm64/mm/ |
D | hugetlbpage.c | 162 pte_t orig_pte = huge_ptep_get(ptep); in get_clear_flush() local 163 bool valid = pte_valid(orig_pte); in get_clear_flush() 175 orig_pte = pte_mkdirty(orig_pte); in get_clear_flush() 178 orig_pte = pte_mkyoung(orig_pte); in get_clear_flush() 185 return orig_pte; in get_clear_flush() 376 pte_t orig_pte = huge_ptep_get(ptep); in huge_ptep_get_and_clear() local 378 if (!pte_cont(orig_pte)) in huge_ptep_get_and_clear() 403 pte_t orig_pte = huge_ptep_get(ptep + i); in __cont_access_flags_changed() local 405 if (pte_dirty(pte) != pte_dirty(orig_pte)) in __cont_access_flags_changed() 408 if (pte_young(pte) != pte_young(orig_pte)) in __cont_access_flags_changed() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
D | book3s_32_mmu_host.c | 130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 138 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() 148 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 151 orig_pte->raddr); in kvmppc_mmu_map_page() 158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 198 if (orig_pte->may_write && writable) { in kvmppc_mmu_map_page() 200 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); in kvmppc_mmu_map_page() 205 if (orig_pte->may_execute) in kvmppc_mmu_map_page() 241 orig_pte->may_write ? 'w' : '-', in kvmppc_mmu_map_page() 242 orig_pte->may_execute ? 'x' : '-', in kvmppc_mmu_map_page() [all …]
|
D | book3s_64_mmu_host.c | 70 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 88 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() 96 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 99 orig_pte->raddr); in kvmppc_mmu_map_page() 106 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 109 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 115 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 121 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 124 if (!orig_pte->may_write || !writable) in kvmppc_mmu_map_page() 131 if (!orig_pte->may_execute) in kvmppc_mmu_map_page() [all …]
|
D | trace_pr.h | 33 struct kvmppc_pte *orig_pte), 34 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), 49 __entry->eaddr = orig_pte->eaddr; 52 __entry->vpage = orig_pte->vpage;
|
D | book3s_hv_rm_mmu.c | 495 u64 pte, orig_pte, pte_r; in kvmppc_do_h_remove() local 504 pte = orig_pte = be64_to_cpu(hpte[0]); in kvmppc_do_h_remove() 513 __unlock_hpte(hpte, orig_pte); in kvmppc_do_h_remove()
|
/kernel/linux/linux-5.10/mm/ |
D | madvise.c | 194 pte_t *orig_pte; in swapin_walk_pmd_entry() local 207 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 208 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() 209 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry() 321 pte_t *orig_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 400 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 428 pte_unmap_unlock(orig_pte, ptl); in madvise_cold_or_pageout_pte_range() 477 pte_unmap_unlock(orig_pte, ptl); in madvise_cold_or_pageout_pte_range() 582 pte_t *orig_pte, *pte, ptent; in madvise_free_pte_range() local 596 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() [all …]
|
D | memory.c | 2588 pte_t *page_table, pte_t orig_pte) in pte_unmap_same() argument 2595 same = pte_same(*page_table, orig_pte); in pte_unmap_same() 2632 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page() 2637 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page() 2647 entry = pte_mkyoung(vmf->orig_pte); in cow_user_page() 2665 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page() 2819 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); in wp_page_reuse() 2820 entry = pte_mkyoung(vmf->orig_pte); in wp_page_reuse() 2857 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy() 2897 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy() [all …]
|
D | swap_state.c | 758 pte_t *pte, *orig_pte; in swap_ra_info() local 772 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info() 775 pte_unmap(orig_pte); in swap_ra_info() 790 pte_unmap(orig_pte); in swap_ra_info() 815 pte_unmap(orig_pte); in swap_ra_info()
|
D | ksm.c | 1035 pte_t *orig_pte) in write_protect_page() argument 1101 *orig_pte = *pvmw.pte; in write_protect_page() 1122 struct page *kpage, pte_t orig_pte) in replace_page() argument 1146 if (!pte_same(*ptep, orig_pte)) { in replace_page() 1206 pte_t orig_pte = __pte(0); in try_to_merge_one_page() local 1236 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page() 1253 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page()
|
D | khugepaged.c | 1019 vmf.orig_pte = *vmf.pte; in __collapse_huge_page_swapin() 1020 if (!is_swap_pte(vmf.orig_pte)) in __collapse_huge_page_swapin()
|
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
D | paging_tmpl.h | 149 pt_element_t orig_pte, pt_element_t new_pte) in FNAME() 163 [old] "+a" (orig_pte), in FNAME() 175 [old] "+A" (orig_pte), in FNAME() 238 pt_element_t pte, orig_pte; in FNAME() local 248 pte = orig_pte = walker->ptes[level - 1]; in FNAME() 265 if (pte == orig_pte) in FNAME() 284 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
|
/kernel/linux/linux-5.10/arch/sparc/mm/ |
D | tlb.c | 205 pte_t orig_pte = __pte(pmd_val(orig)); in __set_pmd_acct() local 206 bool exec = pte_exec(orig_pte); in __set_pmd_acct()
|
/kernel/linux/linux-5.10/arch/arm64/kvm/hyp/ |
D | pgtable.c | 744 kvm_pte_t attr_clr, kvm_pte_t *orig_pte, in stage2_update_leaf_attrs() argument 763 if (orig_pte) in stage2_update_leaf_attrs() 764 *orig_pte = data.pte; in stage2_update_leaf_attrs()
|
/kernel/linux/linux-5.10/fs/proc/ |
D | task_mmu.c | 1451 pte_t *pte, *orig_pte; in pagemap_pmd_range() local 1523 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range() 1532 pte_unmap_unlock(orig_pte, ptl); in pagemap_pmd_range() 1837 pte_t *orig_pte; in gather_pte_stats() local 1856 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats() 1864 pte_unmap_unlock(orig_pte, ptl); in gather_pte_stats()
|
/kernel/linux/linux-5.10/include/linux/ |
D | pgtable.h | 630 pte_t orig_pte) in arch_unmap_one() argument
|
D | mm.h | 541 pte_t orig_pte; /* Value of PTE at the time of fault */ member
|