• Home
  • Raw
  • Download

Lines Matching +full:wp +full:- +full:content

1 // SPDX-License-Identifier: GPL-2.0
89 * struct mm_slot - hash lookup from mm to mm_slot
99 /* pte-mapped THP in this mm */
105 * struct khugepaged_scan - cursor for scanning
139 return -EINVAL; in scan_sleep_millisecs_store()
167 return -EINVAL; in alloc_sleep_millisecs_store()
194 return -EINVAL; in pages_to_scan_store()
261 if (err || max_ptes_none > HPAGE_PMD_NR-1) in khugepaged_max_ptes_none_store()
262 return -EINVAL; in khugepaged_max_ptes_none_store()
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1) in khugepaged_max_ptes_swap_store()
288 return -EINVAL; in khugepaged_max_ptes_swap_store()
314 if (err || max_ptes_shared > HPAGE_PMD_NR-1) in khugepaged_max_ptes_shared_store()
315 return -EINVAL; in khugepaged_max_ptes_shared_store()
356 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
368 return -ENOMEM; in hugepage_madvise()
390 return -ENOMEM; in khugepaged_init()
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init()
422 if (mm == mm_slot->mm) in get_mm_slot()
431 mm_slot->mm = mm; in insert_to_mm_slots_hash()
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
437 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
446 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - in hugepage_vma_check()
447 vma->vm_pgoff, HPAGE_PMD_NR)) in hugepage_vma_check()
451 if (shmem_file(vma->vm_file)) in hugepage_vma_check()
459 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && in hugepage_vma_check()
461 struct inode *inode = vma->vm_file->f_inode; in hugepage_vma_check()
463 return S_ISREG(inode->i_mode); in hugepage_vma_check()
466 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
480 return -ENOMEM; in __khugepaged_enter()
483 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); in __khugepaged_enter()
484 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
496 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); in __khugepaged_enter()
512 * khugepaged only supports read-only files for non-shmem files. in khugepaged_enter_vma_merge()
514 * file-private shmem THP is not supported. in khugepaged_enter_vma_merge()
519 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
520 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
534 hash_del(&mm_slot->hash); in __khugepaged_exit()
535 list_del(&mm_slot->mm_node); in __khugepaged_exit()
541 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
562 -compound_nr(page)); in release_pte_page()
572 while (--_pte >= pte) { in release_pte_pages()
582 list_del(&page->lru); in release_pte_pages()
687 * Page is in the swap cache and cannot be re-used. in __collapse_huge_page_isolate()
711 list_add_tail(&page->lru, compound_pagelist); in __collapse_huge_page_isolate()
716 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
754 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
764 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
774 * be disabled to update the per-cpu stats in __collapse_huge_page_copy()
782 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
790 list_del(&src_page->lru); in __collapse_huge_page_copy()
888 *hpage = ERR_PTR(-ENOMEM); in khugepaged_alloc_page()
968 * Return 0 if succeeds, otherwise return none-zero
985 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
986 hend = vma->vm_end & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
989 if (!hugepage_vma_check(vma, vma->vm_flags)) in hugepage_vma_revalidate()
992 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_revalidate()
1050 vmf.pte--; in __collapse_huge_page_swapin()
1140 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1179 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1188 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1196 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
1261 * Always be strict with uffd-wp in khugepaged_scan_pmd()
1366 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
1393 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot()
1399 hash_del(&mm_slot->hash); in collect_mm_slot()
1400 list_del(&mm_slot->mm_node); in collect_mm_slot()
1405 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in collect_mm_slot()
1416 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1428 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) in khugepaged_add_pte_mapped_thp()
1429 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr; in khugepaged_add_pte_mapped_thp()
1435 * Try to collapse a pte-mapped THP for mm at address haddr.
1439 * as pmd-mapped.
1453 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1454 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE) in collapse_pte_mapped_thp()
1463 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) in collapse_pte_mapped_thp()
1466 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1479 * We need to lock the mapping so that from here on, only GUP-fast and in collapse_pte_mapped_thp()
1483 i_mmap_lock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1489 * tables while all the high-level locks are held in write mode. in collapse_pte_mapped_thp()
1533 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp()
1538 if (vma->anon_vma) in collapse_pte_mapped_thp()
1539 anon_vma_lock_write(vma->anon_vma); in collapse_pte_mapped_thp()
1550 if (vma->anon_vma) in collapse_pte_mapped_thp()
1551 anon_vma_unlock_write(vma->anon_vma); in collapse_pte_mapped_thp()
1552 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1561 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1567 struct mm_struct *mm = mm_slot->mm; in khugepaged_collapse_pte_mapped_thps()
1570 if (likely(mm_slot->nr_pte_mapped_thp == 0)) in khugepaged_collapse_pte_mapped_thps()
1574 return -EBUSY; in khugepaged_collapse_pte_mapped_thps()
1579 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) in khugepaged_collapse_pte_mapped_thps()
1580 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); in khugepaged_collapse_pte_mapped_thps()
1583 mm_slot->nr_pte_mapped_thp = 0; in khugepaged_collapse_pte_mapped_thps()
1596 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1598 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that in retract_page_tables()
1600 * mmap_write_lock(mm) as PMD-mapping is likely to be split in retract_page_tables()
1603 * Not that vma->anon_vma check is racy: it can be set up after in retract_page_tables()
1614 if (vma->anon_vma) in retract_page_tables()
1616 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1619 if (vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables()
1621 mm = vma->vm_mm; in retract_page_tables()
1658 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1661 * - allocate and lock a new huge page;
1662 * - scan page cache replacing old pages with the new one
1666 * - if replacing succeeds:
1670 * - if replacing failed;
1679 struct address_space *mapping = file->f_mapping; in collapse_file()
1684 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file()
1689 VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); in collapse_file()
1706 /* This will be less messy when we use multi-index entries */ in collapse_file()
1722 new_page->index = start; in collapse_file()
1723 new_page->mapping = mapping; in collapse_file()
1726 * At this point the new_page is locked and not up-to-date. in collapse_file()
1740 * hole-punched, and is now completely in collapse_file()
1744 if (!xas_next_entry(&xas, end - 1)) { in collapse_file()
1750 if (!shmem_charge(mapping->host, 1)) { in collapse_file()
1762 if (shmem_getpage(mapping->host, index, &page, in collapse_file()
1777 page_cache_sync_readahead(mapping, &file->f_ra, in collapse_file()
1779 end - index); in collapse_file()
1789 * khugepaged only works on read-only fd, in collapse_file()
1798 * This is a one-off situation. We are not in collapse_file()
1831 * If file was truncated then extended, or hole-punched, before in collapse_file()
1847 * khugepaged only works on read-only fd, so this in collapse_file()
1878 * - we hold a pin on it; in collapse_file()
1879 * - one reference from page cache; in collapse_file()
1880 * - one from isolate_lru_page; in collapse_file()
1893 list_add_tail(&page->lru, &pagelist); in collapse_file()
1926 * need to copy the content and free the old pages. in collapse_file()
1930 while (index < page->index) { in collapse_file()
1934 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), in collapse_file()
1936 list_del(&page->lru); in collapse_file()
1937 page->mapping = NULL; in collapse_file()
1951 page_ref_add(new_page, HPAGE_PMD_NR - 1); in collapse_file()
1957 * Remove pte page tables, so we can re-fault the page as huge. in collapse_file()
1968 mapping->nrpages -= nr_none; in collapse_file()
1971 shmem_uncharge(mapping->host, nr_none); in collapse_file()
1974 xas_for_each(&xas, page, end - 1) { in collapse_file()
1977 if (!page || xas.xa_index < page->index) { in collapse_file()
1980 nr_none--; in collapse_file()
1986 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); in collapse_file()
1989 list_del(&page->lru); in collapse_file()
2001 new_page->mapping = NULL; in collapse_file()
2016 struct address_space *mapping = file->f_mapping; in khugepaged_scan_file()
2017 XA_STATE(xas, &mapping->i_pages, start); in khugepaged_scan_file()
2026 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { in khugepaged_scan_file()
2077 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { in khugepaged_scan_file()
2124 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2136 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
2144 if (!hugepage_vma_check(vma, vma->vm_flags)) { in khugepaged_scan_mm_slot()
2149 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2150 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2158 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) in khugepaged_scan_mm_slot()
2170 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2171 struct file *file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
2210 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { in khugepaged_scan_mm_slot()
2212 mm_slot->mm_node.next, in khugepaged_scan_mm_slot()
2263 progress += khugepaged_scan_mm_slot(pages - progress, in khugepaged_do_scan()
2353 recommended_min <<= (PAGE_SHIFT-10); in set_recommended_min_free_kbytes()