Lines Matching refs:pmd
470 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
473 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
474 return pmd; in maybe_pmd_mkwrite()
582 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
583 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
610 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
611 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
658 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
662 if (!pmd_none(*pmd)) in set_huge_zero_page()
667 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
668 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
702 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
705 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
715 haddr, vmf->pmd, zero_page); in do_huge_pmd_anonymous_page()
736 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
743 ptl = pmd_lock(mm, pmd); in insert_pfn_pmd()
753 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pfn_pmd()
757 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
758 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
763 pmd_t *pmd, pfn_t pfn, bool write) in vmf_insert_pfn_pmd() argument
789 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
849 pmd_t *pmd, int flags) in touch_pmd() argument
853 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
857 pmd, _pmd, flags & FOLL_WRITE)) in touch_pmd()
858 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
862 pmd_t *pmd, int flags) in follow_devmap_pmd() argument
864 unsigned long pfn = pmd_pfn(*pmd); in follow_devmap_pmd()
869 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_devmap_pmd()
877 if (flags & FOLL_WRITE && !pmd_write(*pmd)) in follow_devmap_pmd()
880 if (pmd_present(*pmd) && pmd_devmap(*pmd)) in follow_devmap_pmd()
886 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
912 pmd_t pmd; in copy_huge_pmd() local
929 pmd = *src_pmd; in copy_huge_pmd()
932 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
933 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
935 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
938 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
940 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
941 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
946 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
952 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
961 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
975 src_page = pmd_page(pmd); in copy_huge_pmd()
984 pmd = pmd_mkold(pmd_wrprotect(pmd)); in copy_huge_pmd()
985 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1113 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1114 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in huge_pmd_set_accessed()
1121 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1122 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1181 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1182 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in do_huge_pmd_wp_page_fallback()
1186 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1189 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1209 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1245 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1250 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in do_huge_pmd_wp_page()
1264 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1275 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1276 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1296 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1301 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1313 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1336 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1345 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page()
1349 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_huge_pmd_wp_page()
1350 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1374 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) in can_follow_write_pmd() argument
1376 return pmd_write(pmd) || in can_follow_write_pmd()
1377 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); in can_follow_write_pmd()
1382 pmd_t *pmd, in follow_trans_huge_pmd() argument
1388 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1390 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) in follow_trans_huge_pmd()
1394 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) in follow_trans_huge_pmd()
1398 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) in follow_trans_huge_pmd()
1401 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1404 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1449 int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) in do_huge_pmd_numa_page() argument
1462 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1463 if (unlikely(!pmd_same(pmd, *vmf->pmd))) in do_huge_pmd_numa_page()
1471 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { in do_huge_pmd_numa_page()
1472 page = pmd_page(*vmf->pmd); in do_huge_pmd_numa_page()
1481 page = pmd_page(pmd); in do_huge_pmd_numa_page()
1492 if (!pmd_savedwrite(pmd)) in do_huge_pmd_numa_page()
1528 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1563 vmf->pmd, pmd, vmf->address, page, target_nid); in do_huge_pmd_numa_page()
1573 was_writable = pmd_savedwrite(pmd); in do_huge_pmd_numa_page()
1574 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1575 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
1577 pmd = pmd_mkwrite(pmd); in do_huge_pmd_numa_page()
1578 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1579 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1600 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
1610 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1614 orig_pmd = *pmd; in madvise_free_huge_pmd()
1653 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1657 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
1658 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1669 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
1673 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
1679 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
1686 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1695 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd()
1697 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1700 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1705 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1728 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1732 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1758 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
1761 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
1762 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1763 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
1764 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1766 return pmd; in move_soft_dirty_pmd()
1774 pmd_t pmd; in move_huge_pmd() local
1801 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1802 if (pmd_present(pmd)) in move_huge_pmd()
1811 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
1812 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
1829 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1838 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1842 preserve_write = prot_numa && pmd_write(*pmd); in change_huge_pmd()
1846 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
1847 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
1849 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
1858 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
1860 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
1871 if (prot_numa && is_huge_zero_pmd(*pmd)) in change_huge_pmd()
1874 if (prot_numa && pmd_protnone(*pmd)) in change_huge_pmd()
1898 entry = *pmd; in change_huge_pmd()
1899 pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1905 if (pmd_dirty(*pmd)) in change_huge_pmd()
1907 if (pmd_young(*pmd)) in change_huge_pmd()
1914 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1927 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1930 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1931 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || in __pmd_trans_huge_lock()
1932 pmd_devmap(*pmd))) in __pmd_trans_huge_lock()
2017 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2025 pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2027 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2040 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2043 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2057 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) in __split_huge_pmd_locked()
2058 && !pmd_devmap(*pmd)); in __split_huge_pmd_locked()
2063 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2069 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2081 } else if (is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2082 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2086 pmd_migration = is_pmd_migration_entry(*pmd); in __split_huge_pmd_locked()
2090 entry = pmd_to_swp_entry(*pmd); in __split_huge_pmd_locked()
2094 page = pmd_page(*pmd); in __split_huge_pmd_locked()
2097 write = pmd_write(*pmd); in __split_huge_pmd_locked()
2098 young = pmd_young(*pmd); in __split_huge_pmd_locked()
2099 dirty = pmd_dirty(*pmd); in __split_huge_pmd_locked()
2100 soft_dirty = pmd_soft_dirty(*pmd); in __split_huge_pmd_locked()
2102 pmdp_huge_split_prepare(vma, haddr, pmd); in __split_huge_pmd_locked()
2103 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
2179 pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2180 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
2190 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2198 ptl = pmd_lock(mm, pmd); in __split_huge_pmd()
2205 if (page && page != pmd_page(*pmd)) in __split_huge_pmd()
2208 if (pmd_trans_huge(*pmd)) { in __split_huge_pmd()
2209 page = pmd_page(*pmd); in __split_huge_pmd()
2212 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) in __split_huge_pmd()
2214 __split_huge_pmd_locked(vma, pmd, haddr, freeze); in __split_huge_pmd()
2226 pmd_t *pmd; in split_huge_pmd_address() local
2240 pmd = pmd_offset(pud, address); in split_huge_pmd_address()
2242 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2849 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
2853 pmdval = *pvmw->pmd; in set_pmd_migration_entry()
2854 pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
2861 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
2875 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
2878 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
2881 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
2888 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); in remove_migration_pmd()
2891 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()