• Home
  • Raw
  • Download

Lines Matching refs:mm

85 	struct mm_struct *mm;  member
713 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, in __do_huge_pmd_anonymous_page() argument
724 if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg)) in __do_huge_pmd_anonymous_page()
727 pgtable = pte_alloc_one(mm, haddr); in __do_huge_pmd_anonymous_page()
741 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page()
746 pte_free(mm, pgtable); in __do_huge_pmd_anonymous_page()
754 pgtable_trans_huge_deposit(mm, pmd, pgtable); in __do_huge_pmd_anonymous_page()
755 set_pmd_at(mm, haddr, pmd, entry); in __do_huge_pmd_anonymous_page()
756 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
757 atomic_long_inc(&mm->nr_ptes); in __do_huge_pmd_anonymous_page()
779 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, in set_huge_zero_page() argument
789 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page()
790 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page()
791 atomic_long_inc(&mm->nr_ptes); in set_huge_zero_page()
795 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
814 pgtable = pte_alloc_one(mm, haddr); in do_huge_pmd_anonymous_page()
819 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
823 ptl = pmd_lock(mm, pmd); in do_huge_pmd_anonymous_page()
824 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, in do_huge_pmd_anonymous_page()
828 pte_free(mm, pgtable); in do_huge_pmd_anonymous_page()
839 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { in do_huge_pmd_anonymous_page()
924 void huge_pmd_set_accessed(struct mm_struct *mm, in huge_pmd_set_accessed() argument
934 ptl = pmd_lock(mm, pmd); in huge_pmd_set_accessed()
978 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, in do_huge_pmd_wp_page_fallback() argument
1006 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
1032 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1034 ptl = pmd_lock(mm, pmd); in do_huge_pmd_wp_page_fallback()
1042 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in do_huge_pmd_wp_page_fallback()
1043 pmd_populate(mm, &_pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1056 set_pte_at(mm, haddr, pte, entry); in do_huge_pmd_wp_page_fallback()
1062 pmd_populate(mm, pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1066 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1076 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1087 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_wp_page() argument
1098 ptl = pmd_lockptr(mm, pmd); in do_huge_pmd_wp_page()
1133 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, in do_huge_pmd_wp_page()
1145 if (unlikely(mem_cgroup_try_charge(new_page, mm, in do_huge_pmd_wp_page()
1168 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1186 set_pmd_at(mm, haddr, pmd, entry); in do_huge_pmd_wp_page()
1189 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1200 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1223 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd() local
1226 assert_spin_locked(pmd_lockptr(mm, pmd)); in follow_trans_huge_pmd()
1268 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_numa_page() argument
1281 ptl = pmd_lock(mm, pmdp); in do_huge_pmd_numa_page()
1370 migrated = migrate_misplaced_transhuge_page(mm, vma, in do_huge_pmd_numa_page()
1381 set_pmd_at(mm, haddr, pmdp, pmd); in do_huge_pmd_numa_page()
1414 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); in zap_huge_pmd()
1416 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); in zap_huge_pmd()
1418 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1425 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd()
1427 atomic_long_dec(&tlb->mm->nr_ptes); in zap_huge_pmd()
1431 pte_free(tlb->mm, pgtable); in zap_huge_pmd()
1466 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd() local
1489 new_ptl = pmd_lockptr(mm, new_pmd); in move_huge_pmd()
1492 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd); in move_huge_pmd()
1497 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); in move_huge_pmd()
1498 pgtable_trans_huge_deposit(mm, new_pmd, pgtable); in move_huge_pmd()
1500 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); in move_huge_pmd()
1518 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd() local
1526 entry = pmdp_get_and_clear(mm, addr, pmd); in change_huge_pmd()
1531 set_pmd_at(mm, addr, pmd, entry); in change_huge_pmd()
1544 pmdp_set_numa(mm, addr, pmd); in change_huge_pmd()
1589 struct mm_struct *mm, in page_check_address_pmd() argument
1601 pgd = pgd_offset(mm, address); in page_check_address_pmd()
1609 *ptl = pmd_lock(mm, pmd); in page_check_address_pmd()
1638 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting() local
1646 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1647 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_splitting()
1661 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_splitting()
1790 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map() local
1797 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_map()
1800 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_page_map()
1801 pmd_populate(mm, &_pmd, pgtable); in __split_huge_page_map()
1822 set_pte_at(mm, haddr, pte, entry); in __split_huge_page_map()
1854 pmd_populate(mm, pmd, pgtable); in __split_huge_page_map()
2029 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument
2033 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) in get_mm_slot()
2034 if (mm == mm_slot->mm) in get_mm_slot()
2040 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument
2043 mm_slot->mm = mm; in insert_to_mm_slots_hash()
2044 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash()
2047 static inline int khugepaged_test_exit(struct mm_struct *mm) in khugepaged_test_exit() argument
2049 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit()
2052 int __khugepaged_enter(struct mm_struct *mm) in __khugepaged_enter() argument
2062 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); in __khugepaged_enter()
2063 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { in __khugepaged_enter()
2069 insert_to_mm_slots_hash(mm, mm_slot); in __khugepaged_enter()
2078 atomic_inc(&mm->mm_count); in __khugepaged_enter()
2105 void __khugepaged_exit(struct mm_struct *mm) in __khugepaged_exit() argument
2111 mm_slot = get_mm_slot(mm); in __khugepaged_exit()
2120 clear_bit(MMF_VM_HUGEPAGE, &mm->flags); in __khugepaged_exit()
2122 mmdrop(mm); in __khugepaged_exit()
2132 down_write(&mm->mmap_sem); in __khugepaged_exit()
2133 up_write(&mm->mmap_sem); in __khugepaged_exit()
2331 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, in khugepaged_alloc_page() argument
2343 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2400 *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, in khugepaged_alloc_page() argument
2404 up_read(&mm->mmap_sem); in khugepaged_alloc_page()
2423 static void collapse_huge_page(struct mm_struct *mm, in collapse_huge_page() argument
2443 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node); in collapse_huge_page()
2447 if (unlikely(mem_cgroup_try_charge(new_page, mm, in collapse_huge_page()
2456 down_write(&mm->mmap_sem); in collapse_huge_page()
2457 if (unlikely(khugepaged_test_exit(mm))) in collapse_huge_page()
2460 vma = find_vma(mm, address); in collapse_huge_page()
2469 pmd = mm_find_pmd(mm, address); in collapse_huge_page()
2476 pte_ptl = pte_lockptr(mm, pmd); in collapse_huge_page()
2480 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in collapse_huge_page()
2481 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ in collapse_huge_page()
2490 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in collapse_huge_page()
2505 pmd_populate(mm, pmd, pmd_pgtable(_pmd)); in collapse_huge_page()
2537 pgtable_trans_huge_deposit(mm, pmd, pgtable); in collapse_huge_page()
2538 set_pmd_at(mm, address, pmd, _pmd); in collapse_huge_page()
2546 up_write(&mm->mmap_sem); in collapse_huge_page()
2554 static int khugepaged_scan_pmd(struct mm_struct *mm, in khugepaged_scan_pmd() argument
2569 pmd = mm_find_pmd(mm, address); in khugepaged_scan_pmd()
2574 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd()
2616 collapse_huge_page(mm, address, hpage, vma, node); in khugepaged_scan_pmd()
2624 struct mm_struct *mm = mm_slot->mm; in collect_mm_slot() local
2628 if (khugepaged_test_exit(mm)) { in collect_mm_slot()
2641 mmdrop(mm); in collect_mm_slot()
2651 struct mm_struct *mm; in khugepaged_scan_mm_slot() local
2668 mm = mm_slot->mm; in khugepaged_scan_mm_slot()
2669 down_read(&mm->mmap_sem); in khugepaged_scan_mm_slot()
2670 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2673 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2680 if (unlikely(khugepaged_test_exit(mm))) { in khugepaged_scan_mm_slot()
2702 if (unlikely(khugepaged_test_exit(mm))) in khugepaged_scan_mm_slot()
2708 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2722 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ in khugepaged_scan_mm_slot()
2731 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2841 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd() local
2849 pgtable = pgtable_trans_huge_withdraw(mm, pmd); in __split_huge_zero_page_pmd()
2850 pmd_populate(mm, &_pmd, pgtable); in __split_huge_zero_page_pmd()
2858 set_pte_at(mm, haddr, pte, entry); in __split_huge_zero_page_pmd()
2862 pmd_populate(mm, pmd, pgtable); in __split_huge_zero_page_pmd()
2871 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_pmd() local
2881 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2882 ptl = pmd_lock(mm, pmd); in __split_huge_page_pmd()
2885 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2891 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2898 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in __split_huge_page_pmd()
2913 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, in split_huge_page_pmd_mm() argument
2918 vma = find_vma(mm, address); in split_huge_page_pmd_mm()
2923 static void split_huge_page_address(struct mm_struct *mm, in split_huge_page_address() argument
2932 pgd = pgd_offset(mm, address); in split_huge_page_address()
2947 split_huge_page_pmd_mm(mm, address, pmd); in split_huge_page_address()