Lines Matching full:pmd
480 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
483 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite()
484 return pmd; in maybe_pmd_mkwrite()
588 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
589 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page()
616 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
617 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
664 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
668 if (!pmd_none(*pmd)) in set_huge_zero_page()
673 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
674 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
708 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
711 if (pmd_none(*vmf->pmd)) { in do_huge_pmd_anonymous_page()
721 haddr, vmf->pmd, zero_page); in do_huge_pmd_anonymous_page()
742 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, in insert_pfn_pmd() argument
749 ptl = pmd_lock(mm, pmd); in insert_pfn_pmd()
750 if (!pmd_none(*pmd)) { in insert_pfn_pmd()
752 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) { in insert_pfn_pmd()
753 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd)); in insert_pfn_pmd()
756 entry = pmd_mkyoung(*pmd); in insert_pfn_pmd()
758 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1)) in insert_pfn_pmd()
759 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
774 pgtable_trans_huge_deposit(mm, pmd, pgtable); in insert_pfn_pmd()
779 set_pmd_at(mm, addr, pmd, entry); in insert_pfn_pmd()
780 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
817 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
895 pmd_t *pmd, int flags) in touch_pmd() argument
899 _pmd = pmd_mkyoung(*pmd); in touch_pmd()
903 pmd, _pmd, flags & FOLL_WRITE)) in touch_pmd()
904 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
908 pmd_t *pmd, int flags) in follow_devmap_pmd() argument
910 unsigned long pfn = pmd_pfn(*pmd); in follow_devmap_pmd()
915 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_devmap_pmd()
918 * When we COW a devmap PMD entry, we split it into PTEs, so we should in follow_devmap_pmd()
923 if (flags & FOLL_WRITE && !pmd_write(*pmd)) in follow_devmap_pmd()
926 if (pmd_present(*pmd) && pmd_devmap(*pmd)) in follow_devmap_pmd()
932 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
958 pmd_t pmd; in copy_huge_pmd() local
975 pmd = *src_pmd; in copy_huge_pmd()
978 if (unlikely(is_swap_pmd(pmd))) { in copy_huge_pmd()
979 swp_entry_t entry = pmd_to_swp_entry(pmd); in copy_huge_pmd()
981 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in copy_huge_pmd()
984 pmd = swp_entry_to_pmd(entry); in copy_huge_pmd()
986 pmd = pmd_swp_mksoft_dirty(pmd); in copy_huge_pmd()
987 set_pmd_at(src_mm, addr, src_pmd, pmd); in copy_huge_pmd()
992 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
998 if (unlikely(!pmd_trans_huge(pmd))) { in copy_huge_pmd()
1003 * When page table lock is held, the huge zero pmd should not be in copy_huge_pmd()
1004 * under splitting since we don't split the page itself, only pmd to in copy_huge_pmd()
1007 if (is_huge_zero_pmd(pmd)) { in copy_huge_pmd()
1021 src_page = pmd_page(pmd); in copy_huge_pmd()
1030 pmd = pmd_mkold(pmd_wrprotect(pmd)); in copy_huge_pmd()
1031 set_pmd_at(dst_mm, addr, dst_pmd, pmd); in copy_huge_pmd()
1159 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1160 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in huge_pmd_set_accessed()
1167 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1168 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1228 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1229 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in do_huge_pmd_wp_page_fallback()
1234 * Leave pmd empty until pte is filled note we must notify here as in do_huge_pmd_wp_page_fallback()
1241 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1243 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1262 smp_wmb(); /* make pte visible before pmd */ in do_huge_pmd_wp_page_fallback()
1263 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1304 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1309 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) in do_huge_pmd_wp_page()
1323 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1334 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1335 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1355 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1360 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1372 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1396 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) { in do_huge_pmd_wp_page()
1405 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page()
1409 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_huge_pmd_wp_page()
1410 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1436 * FOLL_FORCE can write to even unwritable pmd's, but only
1439 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) in can_follow_write_pmd() argument
1441 return pmd_write(pmd) || in can_follow_write_pmd()
1442 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); in can_follow_write_pmd()
1447 pmd_t *pmd, in follow_trans_huge_pmd() argument
1453 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1455 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) in follow_trans_huge_pmd()
1459 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) in follow_trans_huge_pmd()
1463 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) in follow_trans_huge_pmd()
1466 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1469 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1477 * In most cases the pmd is the only mapping of the page as we in follow_trans_huge_pmd()
1514 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd) in do_huge_pmd_numa_page() argument
1527 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1528 if (unlikely(!pmd_same(pmd, *vmf->pmd))) in do_huge_pmd_numa_page()
1536 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { in do_huge_pmd_numa_page()
1537 page = pmd_page(*vmf->pmd); in do_huge_pmd_numa_page()
1546 page = pmd_page(pmd); in do_huge_pmd_numa_page()
1557 if (!pmd_savedwrite(pmd)) in do_huge_pmd_numa_page()
1591 /* Confirm the PMD did not change while page_table_lock was released */ in do_huge_pmd_numa_page()
1593 if (unlikely(!pmd_same(pmd, *vmf->pmd))) { in do_huge_pmd_numa_page()
1628 vmf->pmd, pmd, vmf->address, page, target_nid); in do_huge_pmd_numa_page()
1638 was_writable = pmd_savedwrite(pmd); in do_huge_pmd_numa_page()
1639 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1640 pmd = pmd_mkyoung(pmd); in do_huge_pmd_numa_page()
1642 pmd = pmd_mkwrite(pmd); in do_huge_pmd_numa_page()
1643 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1644 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1661 * Return true if we do MADV_FREE successfully on entire pmd page.
1665 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
1675 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1679 orig_pmd = *pmd; in madvise_free_huge_pmd()
1718 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1722 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_free_huge_pmd()
1723 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_free_huge_pmd()
1734 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) in zap_deposited_table() argument
1738 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in zap_deposited_table()
1744 pmd_t *pmd, unsigned long addr) in zap_huge_pmd() argument
1751 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1760 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, in zap_huge_pmd()
1762 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in zap_huge_pmd()
1765 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1770 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1790 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); in zap_huge_pmd()
1793 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1797 zap_deposited_table(tlb->mm, pmd); in zap_huge_pmd()
1814 * With split pmd lock we also need to move preallocated in pmd_move_must_withdraw()
1815 * PTE page table if new_pmd is on different PMD page table. in pmd_move_must_withdraw()
1823 static pmd_t move_soft_dirty_pmd(pmd_t pmd) in move_soft_dirty_pmd() argument
1826 if (unlikely(is_pmd_migration_entry(pmd))) in move_soft_dirty_pmd()
1827 pmd = pmd_swp_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1828 else if (pmd_present(pmd)) in move_soft_dirty_pmd()
1829 pmd = pmd_mksoft_dirty(pmd); in move_soft_dirty_pmd()
1831 return pmd; in move_soft_dirty_pmd()
1839 pmd_t pmd; in move_huge_pmd() local
1849 * The destination pmd shouldn't be established, free_pgtables() in move_huge_pmd()
1866 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1867 if (pmd_present(pmd)) in move_huge_pmd()
1876 pmd = move_soft_dirty_pmd(pmd); in move_huge_pmd()
1877 set_pmd_at(mm, new_addr, new_pmd, pmd); in move_huge_pmd()
1890 * - 0 if PMD could not be locked
1891 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1894 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1903 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1907 preserve_write = prot_numa && pmd_write(*pmd); in change_huge_pmd()
1911 if (is_swap_pmd(*pmd)) { in change_huge_pmd()
1912 swp_entry_t entry = pmd_to_swp_entry(*pmd); in change_huge_pmd()
1914 VM_BUG_ON(!is_pmd_migration_entry(*pmd)); in change_huge_pmd()
1923 if (pmd_swp_soft_dirty(*pmd)) in change_huge_pmd()
1925 set_pmd_at(mm, addr, pmd, newpmd); in change_huge_pmd()
1936 if (prot_numa && is_huge_zero_pmd(*pmd)) in change_huge_pmd()
1939 if (prot_numa && pmd_protnone(*pmd)) in change_huge_pmd()
1944 * to not clear pmd intermittently to avoid race with MADV_DONTNEED in change_huge_pmd()
1952 * pmd_trans_huge(*pmd) == 0 (without ptl) in change_huge_pmd()
1953 * // skip the pmd in change_huge_pmd()
1955 * // pmd is re-established in change_huge_pmd()
1957 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it in change_huge_pmd()
1963 entry = pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1969 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1977 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1982 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1985 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1986 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || in __pmd_trans_huge_lock()
1987 pmd_devmap(*pmd))) in __pmd_trans_huge_lock()
2077 unsigned long haddr, pmd_t *pmd) in __split_huge_zero_page_pmd() argument
2085 * Leave pmd empty until pte is filled note that it is fine to delay in __split_huge_zero_page_pmd()
2087 * replacing a zero pmd write protected page with a zero pte write in __split_huge_zero_page_pmd()
2092 pmdp_huge_clear_flush(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2094 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2106 smp_wmb(); /* make pte visible before pmd */ in __split_huge_zero_page_pmd()
2107 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2110 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2124 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd) in __split_huge_pmd_locked()
2125 && !pmd_devmap(*pmd)); in __split_huge_pmd_locked()
2130 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2136 zap_deposited_table(mm, pmd); in __split_huge_pmd_locked()
2148 } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { in __split_huge_pmd_locked()
2158 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2162 * Up to this point the pmd is present and huge and userland has the in __split_huge_pmd_locked()
2164 * place). If we overwrite the pmd with the not-huge version pointing in __split_huge_pmd_locked()
2176 * current pmd notpresent (atomically because here the pmd_trans_huge in __split_huge_pmd_locked()
2177 * must remain set at all times on the pmd until the split is complete in __split_huge_pmd_locked()
2178 * for this pmd), then we flush the SMP TLB and finally we write the in __split_huge_pmd_locked()
2179 * non-huge version of the pmd entry with pmd_populate. in __split_huge_pmd_locked()
2181 old_pmd = pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2204 * Withdraw the table only after we mark the pmd entry invalid. in __split_huge_pmd_locked()
2207 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_pmd_locked()
2265 smp_wmb(); /* make pte visible before pmd */ in __split_huge_pmd_locked()
2266 pmd_populate(mm, pmd, pgtable); in __split_huge_pmd_locked()
2276 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2286 ptl = pmd_lock(mm, pmd); in __split_huge_pmd()
2290 * pmd against. Otherwise we can end up replacing wrong page. in __split_huge_pmd()
2296 if (page != pmd_page(*pmd)) in __split_huge_pmd()
2301 if (pmd_trans_huge(*pmd)) { in __split_huge_pmd()
2303 page = pmd_page(*pmd); in __split_huge_pmd()
2306 _pmd = *pmd; in __split_huge_pmd()
2310 if (unlikely(!pmd_same(*pmd, _pmd))) { in __split_huge_pmd()
2321 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd))) in __split_huge_pmd()
2323 __split_huge_pmd_locked(vma, pmd, haddr, freeze); in __split_huge_pmd()
2336 * 3) Split a huge pmd into pte pointing to the same page. No need in __split_huge_pmd()
2351 pmd_t *pmd; in split_huge_pmd_address() local
2365 pmd = pmd_offset(pud, address); in split_huge_pmd_address()
2367 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2378 * an huge pmd. in vma_adjust_trans_huge()
2388 * an huge pmd. in vma_adjust_trans_huge()
2398 * contain an hugepage: check if we need to split an huge pmd. in vma_adjust_trans_huge()
2979 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
2983 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
2990 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
3004 if (!(pvmw->pmd && !pvmw->pte)) in remove_migration_pmd()
3007 entry = pmd_to_swp_entry(*pvmw->pmd); in remove_migration_pmd()
3010 if (pmd_swp_soft_dirty(*pvmw->pmd)) in remove_migration_pmd()
3020 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); in remove_migration_pmd()
3023 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()