• Home
  • Raw
  • Download

Lines Matching refs:mm

97 	struct mm_struct *mm;  member
417 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument
421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) in get_mm_slot()
422 if (mm == mm_slot->mm) in get_mm_slot()
428 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument
431 mm_slot->mm = mm; in insert_to_mm_slots_hash()
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
435 static inline int khugepaged_test_exit(struct mm_struct *mm) in khugepaged_test_exit() argument
437 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
474 int __khugepaged_enter(struct mm_struct *mm) in __khugepaged_enter() argument
484 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm); in __khugepaged_enter()
485 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
491 insert_to_mm_slots_hash(mm, mm_slot); in __khugepaged_enter()
500 mmgrab(mm); in __khugepaged_enter()
527 void __khugepaged_exit(struct mm_struct *mm) in __khugepaged_exit() argument
533 mm_slot = get_mm_slot(mm); in __khugepaged_exit()
542 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
544 mmdrop(mm); in __khugepaged_exit()
554 mmap_write_lock(mm); in __khugepaged_exit()
555 mmap_write_unlock(mm); in __khugepaged_exit()
973 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, in hugepage_vma_revalidate() argument
979 if (unlikely(khugepaged_test_exit(mm))) in hugepage_vma_revalidate()
982 *vmap = vma = find_vma(mm, address); in hugepage_vma_revalidate()
1006 static bool __collapse_huge_page_swapin(struct mm_struct *mm, in __collapse_huge_page_swapin() argument
1037 mmap_read_lock(mm); in __collapse_huge_page_swapin()
1038 if (hugepage_vma_revalidate(mm, haddr, &vma)) { in __collapse_huge_page_swapin()
1040 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); in __collapse_huge_page_swapin()
1044 if (mm_find_pmd(mm, haddr) != pmd) { in __collapse_huge_page_swapin()
1045 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); in __collapse_huge_page_swapin()
1050 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); in __collapse_huge_page_swapin()
1059 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); in __collapse_huge_page_swapin()
1063 static void collapse_huge_page(struct mm_struct *mm, in collapse_huge_page() argument
1090 mmap_read_unlock(mm); in collapse_huge_page()
1097 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_huge_page()
1103 mmap_read_lock(mm); in collapse_huge_page()
1104 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
1106 mmap_read_unlock(mm); in collapse_huge_page()
1110 pmd = mm_find_pmd(mm, address); in collapse_huge_page()
1113 mmap_read_unlock(mm); in collapse_huge_page()
1122 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, in collapse_huge_page()
1124 mmap_read_unlock(mm); in collapse_huge_page()
1128 mmap_read_unlock(mm); in collapse_huge_page()
1134 mmap_write_lock(mm); in collapse_huge_page()
1135 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
1139 if (mm_find_pmd(mm, address) != pmd) in collapse_huge_page()
1145 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, in collapse_huge_page()
1150 pte_ptl = pte_lockptr(mm, pmd); in collapse_huge_page()
1152 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ in collapse_huge_page()
1180 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); in collapse_huge_page()
1214 pgtable_trans_huge_deposit(mm, pmd, pgtable); in collapse_huge_page()
1215 set_pmd_at(mm, address, pmd, _pmd); in collapse_huge_page()
1225 mmap_write_unlock(mm); in collapse_huge_page()
1229 trace_mm_collapse_huge_page(mm, isolated, result); in collapse_huge_page()
1235 static int khugepaged_scan_pmd(struct mm_struct *mm, in khugepaged_scan_pmd() argument
1252 pmd = mm_find_pmd(mm, address); in khugepaged_scan_pmd()
1259 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd()
1387 collapse_huge_page(mm, address, hpage, node, in khugepaged_scan_pmd()
1391 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, in khugepaged_scan_pmd()
1398 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot() local
1402 if (khugepaged_test_exit(mm)) { in collect_mm_slot()
1415 mmdrop(mm); in collect_mm_slot()
1424 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm, in khugepaged_add_pte_mapped_thp() argument
1432 mm_slot = get_mm_slot(mm); in khugepaged_add_pte_mapped_thp()
1446 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) in collapse_pte_mapped_thp() argument
1449 struct vm_area_struct *vma = find_vma(mm, haddr); in collapse_pte_mapped_thp()
1479 pmd = mm_find_pmd(mm, haddr); in collapse_pte_mapped_thp()
1498 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); in collapse_pte_mapped_thp()
1548 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr, in collapse_pte_mapped_thp()
1553 mm_dec_nr_ptes(mm); in collapse_pte_mapped_thp()
1556 pte_free(mm, pmd_pgtable(_pmd)); in collapse_pte_mapped_thp()
1576 struct mm_struct *mm = mm_slot->mm; in khugepaged_collapse_pte_mapped_thps() local
1582 if (!mmap_write_trylock(mm)) in khugepaged_collapse_pte_mapped_thps()
1585 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_collapse_pte_mapped_thps()
1589 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]); in khugepaged_collapse_pte_mapped_thps()
1593 mmap_write_unlock(mm); in khugepaged_collapse_pte_mapped_thps()
1600 struct mm_struct *mm; in retract_page_tables() local
1630 mm = vma->vm_mm; in retract_page_tables()
1631 pmd = mm_find_pmd(mm, addr); in retract_page_tables()
1641 if (mmap_write_trylock(mm)) { in retract_page_tables()
1642 if (!khugepaged_test_exit(mm)) { in retract_page_tables()
1648 NULL, mm, addr, in retract_page_tables()
1654 mm_dec_nr_ptes(mm); in retract_page_tables()
1656 pte_free(mm, pmd_pgtable(_pmd)); in retract_page_tables()
1659 mmap_write_unlock(mm); in retract_page_tables()
1662 khugepaged_add_pte_mapped_thp(mm, addr); in retract_page_tables()
1686 static void collapse_file(struct mm_struct *mm, in collapse_file() argument
1711 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) { in collapse_file()
2036 static void khugepaged_scan_file(struct mm_struct *mm, in khugepaged_scan_file() argument
2105 collapse_file(mm, file, start, hpage, node); in khugepaged_scan_file()
2112 static void khugepaged_scan_file(struct mm_struct *mm, in khugepaged_scan_file() argument
2130 struct mm_struct *mm; in khugepaged_scan_mm_slot() local
2148 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2154 if (unlikely(!mmap_read_trylock(mm))) in khugepaged_scan_mm_slot()
2156 if (likely(!khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2157 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2164 if (unlikely(khugepaged_test_exit(mm))) { in khugepaged_scan_mm_slot()
2188 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2199 mmap_read_unlock(mm); in khugepaged_scan_mm_slot()
2201 khugepaged_scan_file(mm, file, pgoff, hpage); in khugepaged_scan_mm_slot()
2204 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2219 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */ in khugepaged_scan_mm_slot()
2228 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()