Lines Matching +full:wp +full:- +full:content
1 // SPDX-License-Identifier: GPL-2.0
89 * struct mm_slot - hash lookup from mm to mm_slot
99 /* pte-mapped THP in this mm */
105 * struct khugepaged_scan - cursor for scanning
139 return -EINVAL; in scan_sleep_millisecs_store()
167 return -EINVAL; in alloc_sleep_millisecs_store()
194 return -EINVAL; in pages_to_scan_store()
261 if (err || max_ptes_none > HPAGE_PMD_NR-1) in khugepaged_max_ptes_none_store()
262 return -EINVAL; in khugepaged_max_ptes_none_store()
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1) in khugepaged_max_ptes_swap_store()
288 return -EINVAL; in khugepaged_max_ptes_swap_store()
314 if (err || max_ptes_shared > HPAGE_PMD_NR-1) in khugepaged_max_ptes_shared_store()
315 return -EINVAL; in khugepaged_max_ptes_shared_store()
356 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
368 return -ENOMEM; in hugepage_madvise()
390 return -ENOMEM; in khugepaged_init()
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init()
422 if (mm == mm_slot->mm) in get_mm_slot()
431 mm_slot->mm = mm; in insert_to_mm_slots_hash()
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
437 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
446 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - in hugepage_vma_check()
447 vma->vm_pgoff, HPAGE_PMD_NR)) in hugepage_vma_check()
451 if (shmem_file(vma->vm_file)) in hugepage_vma_check()
459 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && in hugepage_vma_check()
461 struct inode *inode = vma->vm_file->f_inode; in hugepage_vma_check()
463 return S_ISREG(inode->i_mode); in hugepage_vma_check()
466 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
480 return -ENOMEM; in __khugepaged_enter()
483 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); in __khugepaged_enter()
484 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
496 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); in __khugepaged_enter()
512 * khugepaged only supports read-only files for non-shmem files. in khugepaged_enter_vma_merge()
514 * file-private shmem THP is not supported. in khugepaged_enter_vma_merge()
519 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
520 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
534 hash_del(&mm_slot->hash); in __khugepaged_exit()
535 list_del(&mm_slot->mm_node); in __khugepaged_exit()
541 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
562 -compound_nr(page)); in release_pte_page()
572 while (--_pte >= pte) { in release_pte_pages()
582 list_del(&page->lru); in release_pte_pages()
683 * Page is in the swap cache and cannot be re-used. in __collapse_huge_page_isolate()
707 list_add_tail(&page->lru, compound_pagelist); in __collapse_huge_page_isolate()
712 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
750 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
760 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
770 * be disabled to update the per-cpu stats in __collapse_huge_page_copy()
778 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
786 list_del(&src_page->lru); in __collapse_huge_page_copy()
884 *hpage = ERR_PTR(-ENOMEM); in khugepaged_alloc_page()
964 * Return 0 if succeeds, otherwise return none-zero
981 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
982 hend = vma->vm_end & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
985 if (!hugepage_vma_check(vma, vma->vm_flags)) in hugepage_vma_revalidate()
988 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_revalidate()
1046 vmf.pte--; in __collapse_huge_page_swapin()
1136 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1172 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1181 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1189 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
1254 * Always be strict with uffd-wp in khugepaged_scan_pmd()
1359 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
1386 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot()
1392 hash_del(&mm_slot->hash); in collect_mm_slot()
1393 list_del(&mm_slot->mm_node); in collect_mm_slot()
1398 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in collect_mm_slot()
1409 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1421 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) in khugepaged_add_pte_mapped_thp()
1422 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; in khugepaged_add_pte_mapped_thp()
1428 * Try to collapse a pte-mapped THP for mm at address haddr.
1432 * as pmd-mapped.
1445 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1446 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) in collapse_pte_mapped_thp()
1455 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) in collapse_pte_mapped_thp()
1458 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1512 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp()
1516 ptl = pmd_lock(vma->vm_mm, pmd); in collapse_pte_mapped_thp()
1534 struct mm_struct *mm = mm_slot->mm; in khugepaged_collapse_pte_mapped_thps()
1537 if (likely(mm_slot->nr_pte_mapped_thp == 0)) in khugepaged_collapse_pte_mapped_thps()
1541 return -EBUSY; in khugepaged_collapse_pte_mapped_thps()
1546 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) in khugepaged_collapse_pte_mapped_thps()
1547 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); in khugepaged_collapse_pte_mapped_thps()
1550 mm_slot->nr_pte_mapped_thp = 0; in khugepaged_collapse_pte_mapped_thps()
1563 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1565 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that in retract_page_tables()
1567 * mmap_write_lock(mm) as PMD-mapping is likely to be split in retract_page_tables()
1570 * Not that vma->anon_vma check is racy: it can be set up after in retract_page_tables()
1580 if (vma->anon_vma) in retract_page_tables()
1582 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1585 if (vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables()
1587 mm = vma->vm_mm; in retract_page_tables()
1617 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1620 * - allocate and lock a new huge page;
1621 * - scan page cache replacing old pages with the new one
1625 * - if replacing succeeds:
1629 * - if replacing failed;
1638 struct address_space *mapping = file->f_mapping; in collapse_file()
1643 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file()
1648 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); in collapse_file()
1665 /* This will be less messy when we use multi-index entries */ in collapse_file()
1681 new_page->index = start; in collapse_file()
1682 new_page->mapping = mapping; in collapse_file()
1685 * At this point the new_page is locked and not up-to-date. in collapse_file()
1699 * hole-punched, and is now completely in collapse_file()
1703 if (!xas_next_entry(&xas, end - 1)) { in collapse_file()
1709 if (!shmem_charge(mapping->host, 1)) { in collapse_file()
1721 if (shmem_getpage(mapping->host, index, &page, in collapse_file()
1736 page_cache_sync_readahead(mapping, &file->f_ra, in collapse_file()
1738 end - index); in collapse_file()
1748 * khugepaged only works on read-only fd, in collapse_file()
1757 * This is a one-off situation. We are not in collapse_file()
1790 * If file was truncated then extended, or hole-punched, before in collapse_file()
1806 * khugepaged only works on read-only fd, so this in collapse_file()
1837 * - we hold a pin on it; in collapse_file()
1838 * - one reference from page cache; in collapse_file()
1839 * - one from isolate_lru_page; in collapse_file()
1852 list_add_tail(&page->lru, &pagelist); in collapse_file()
1885 * need to copy the content and free the old pages. in collapse_file()
1889 while (index < page->index) { in collapse_file()
1893 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), in collapse_file()
1895 list_del(&page->lru); in collapse_file()
1896 page->mapping = NULL; in collapse_file()
1910 page_ref_add(new_page, HPAGE_PMD_NR - 1); in collapse_file()
1916 * Remove pte page tables, so we can re-fault the page as huge. in collapse_file()
1927 mapping->nrpages -= nr_none; in collapse_file()
1930 shmem_uncharge(mapping->host, nr_none); in collapse_file()
1933 xas_for_each(&xas, page, end - 1) { in collapse_file()
1936 if (!page || xas.xa_index < page->index) { in collapse_file()
1939 nr_none--; in collapse_file()
1945 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); in collapse_file()
1948 list_del(&page->lru); in collapse_file()
1960 new_page->mapping = NULL; in collapse_file()
1975 struct address_space *mapping = file->f_mapping; in khugepaged_scan_file()
1976 XA_STATE(xas, &mapping->i_pages, start); in khugepaged_scan_file()
1985 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { in khugepaged_scan_file()
2036 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { in khugepaged_scan_file()
2083 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2095 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
2103 if (!hugepage_vma_check(vma, vma->vm_flags)) { in khugepaged_scan_mm_slot()
2108 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2109 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2117 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) in khugepaged_scan_mm_slot()
2129 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2130 struct file *file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
2169 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { in khugepaged_scan_mm_slot()
2171 mm_slot->mm_node.next, in khugepaged_scan_mm_slot()
2222 progress += khugepaged_scan_mm_slot(pages - progress, in khugepaged_do_scan()
2312 recommended_min <<= (PAGE_SHIFT-10); in set_recommended_min_free_kbytes()