Lines Matching refs:mm
183 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count, in mm_trace_rss_stat() argument
190 trace_rss_stat(mm, member, count); in mm_trace_rss_stat()
196 void sync_mm_rss(struct mm_struct *mm) in sync_mm_rss() argument
202 add_mm_counter(mm, i, current->rss_stat.count[i]); in sync_mm_rss()
209 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument
213 if (likely(task->mm == mm)) in add_mm_counter_fast()
216 add_mm_counter(mm, member, val); in add_mm_counter_fast()
218 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) argument
219 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) argument
228 sync_mm_rss(task->mm); in check_sync_rss_stat()
232 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) argument
233 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) argument
256 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in free_pte_range()
261 mm_dec_nr_ptes(tlb->mm); in free_pte_range()
295 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
329 mm_dec_nr_puds(tlb->mm); in free_pud_range()
421 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
469 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) in __pte_alloc() argument
472 pgtable_t new = pte_alloc_one(mm); in __pte_alloc()
491 ptl = pmd_lock(mm, pmd); in __pte_alloc()
493 mm_inc_nr_ptes(mm); in __pte_alloc()
494 pmd_populate(mm, pmd, new); in __pte_alloc()
499 pte_free(mm, new); in __pte_alloc()
527 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) in add_mm_rss_vec() argument
531 if (current->mm == mm) in add_mm_rss_vec()
532 sync_mm_rss(mm); in add_mm_rss_vec()
535 add_mm_counter(mm, i, rss[i]); in add_mm_rss_vec()
1283 struct mm_struct *mm = tlb->mm; in zap_pte_range() local
1294 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1296 flush_tlb_batched_pending(mm); in zap_pte_range()
1320 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1363 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1386 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1389 add_mm_rss_vec(mm, rss); in zap_pte_range()
1436 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range()
1475 mmap_assert_locked(tlb->mm); in zap_pud_range()
1680 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) in walk_to_pmd() argument
1687 pgd = pgd_offset(mm, addr); in walk_to_pmd()
1688 p4d = p4d_alloc(mm, pgd, addr); in walk_to_pmd()
1691 pud = pud_alloc(mm, p4d, addr); in walk_to_pmd()
1694 pmd = pmd_alloc(mm, pud, addr); in walk_to_pmd()
1702 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
1705 pmd_t *pmd = walk_to_pmd(mm, addr); in __get_locked_pte()
1709 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1720 static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte, in insert_page_into_pte_locked() argument
1727 inc_mm_counter_fast(mm, mm_counter_file(page)); in insert_page_into_pte_locked()
1729 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page_into_pte_locked()
1743 struct mm_struct *mm = vma->vm_mm; in insert_page() local
1752 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1755 retval = insert_page_into_pte_locked(mm, pte, addr, page, prot); in insert_page()
1762 static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte, in insert_page_in_batch_locked() argument
1772 return insert_page_into_pte_locked(mm, pte, addr, page, prot); in insert_page_in_batch_locked()
1784 struct mm_struct *const mm = vma->vm_mm; in insert_pages() local
1791 pmd = walk_to_pmd(mm, addr); in insert_pages()
1800 if (pte_alloc(mm, pmd)) in insert_pages()
1807 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages()
1809 int err = insert_page_in_batch_locked(mm, pte, in insert_pages()
2007 struct mm_struct *mm = vma->vm_mm; in insert_pfn() local
2011 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
2050 set_pte_at(mm, addr, pte, entry); in insert_pfn()
2251 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
2259 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
2269 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
2277 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, in remap_pmd_range() argument
2286 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
2292 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
2300 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, in remap_pud_range() argument
2309 pud = pud_alloc(mm, p4d, addr); in remap_pud_range()
2314 err = remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
2322 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, in remap_p4d_range() argument
2331 p4d = p4d_alloc(mm, pgd, addr); in remap_p4d_range()
2336 err = remap_pud_range(mm, p4d, addr, next, in remap_p4d_range()
2362 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range() local
2401 pgd = pgd_offset(mm, addr); in remap_pfn_range()
2405 err = remap_p4d_range(mm, pgd, addr, next, in remap_pfn_range()
2467 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
2477 pte = (mm == &init_mm) ? in apply_to_pte_range()
2479 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2483 pte = (mm == &init_mm) ? in apply_to_pte_range()
2485 pte_offset_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
2505 if (mm != &init_mm) in apply_to_pte_range()
2510 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, in apply_to_pmd_range() argument
2522 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range()
2531 err = apply_to_pte_range(mm, pmd, addr, next, fn, data, in apply_to_pmd_range()
2540 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, in apply_to_pud_range() argument
2550 pud = pud_alloc_track(mm, p4d, addr, mask); in apply_to_pud_range()
2559 err = apply_to_pmd_range(mm, pud, addr, next, fn, data, in apply_to_pud_range()
2568 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, in apply_to_p4d_range() argument
2578 p4d = p4d_alloc_track(mm, pgd, addr, mask); in apply_to_p4d_range()
2587 err = apply_to_pud_range(mm, p4d, addr, next, fn, data, in apply_to_p4d_range()
2596 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, in __apply_to_page_range() argument
2609 pgd = pgd_offset(mm, addr); in __apply_to_page_range()
2614 err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask); in __apply_to_page_range()
2629 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
2632 return __apply_to_page_range(mm, addr, size, fn, data, true); in apply_to_page_range()
2862 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_existing_page_range() argument
2865 return __apply_to_page_range(mm, addr, size, fn, data, false); in apply_to_existing_page_range()
2909 struct mm_struct *mm = vma->vm_mm; in cow_user_page() local
2933 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
2961 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in cow_user_page()
3145 struct mm_struct *mm = vma->vm_mm; in wp_page_copy() local
3182 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) in wp_page_copy()
3188 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, in wp_page_copy()
3203 dec_mm_counter_fast(mm, in wp_page_copy()
3205 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
3208 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
3228 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
4863 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault() local
4868 pgd = pgd_offset(mm, address); in __handle_mm_fault()
4869 p4d = p4d_alloc(mm, pgd, address); in __handle_mm_fault()
4873 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
4900 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
4923 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
5024 static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm, in ___handle_speculative_fault() argument
5128 pgd = pgd_offset(mm, address); in ___handle_speculative_fault()
5246 vm_fault_t __handle_speculative_fault(struct mm_struct *mm, in __handle_speculative_fault() argument
5255 *vma = get_vma(mm, address); in __handle_speculative_fault()
5259 ret = ___handle_speculative_fault(mm, address, flags, *vma); in __handle_speculative_fault()
5351 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __p4d_alloc() argument
5353 p4d_t *new = p4d_alloc_one(mm, address); in __p4d_alloc()
5359 spin_lock(&mm->page_table_lock); in __p4d_alloc()
5361 p4d_free(mm, new); in __p4d_alloc()
5363 pgd_populate(mm, pgd, new); in __p4d_alloc()
5364 spin_unlock(&mm->page_table_lock); in __p4d_alloc()
5374 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) in __pud_alloc() argument
5376 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
5382 spin_lock(&mm->page_table_lock); in __pud_alloc()
5384 mm_inc_nr_puds(mm); in __pud_alloc()
5385 p4d_populate(mm, p4d, new); in __pud_alloc()
5387 pud_free(mm, new); in __pud_alloc()
5388 spin_unlock(&mm->page_table_lock); in __pud_alloc()
5398 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
5401 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
5407 ptl = pud_lock(mm, pud); in __pmd_alloc()
5409 mm_inc_nr_pmds(mm); in __pmd_alloc()
5410 pud_populate(mm, pud, new); in __pmd_alloc()
5412 pmd_free(mm, new); in __pmd_alloc()
5418 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, in follow_invalidate_pte() argument
5428 pgd = pgd_offset(mm, address); in follow_invalidate_pte()
5449 NULL, mm, address & PMD_MASK, in follow_invalidate_pte()
5453 *ptlp = pmd_lock(mm, pmd); in follow_invalidate_pte()
5467 mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm, in follow_invalidate_pte()
5472 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in follow_invalidate_pte()
5506 int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
5509 return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp); in follow_pte()
5604 int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
5611 if (mmap_read_lock_killable(mm)) in __access_remote_vm()
5620 ret = get_user_pages_remote(mm, addr, 1, in __access_remote_vm()
5630 vma = find_vma(mm, addr); in __access_remote_vm()
5662 mmap_read_unlock(mm); in __access_remote_vm()
5679 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
5682 return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags); in access_remote_vm()
5693 struct mm_struct *mm; in access_process_vm() local
5696 mm = get_task_mm(tsk); in access_process_vm()
5697 if (!mm) in access_process_vm()
5700 ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags); in access_process_vm()
5702 mmput(mm); in access_process_vm()
5713 struct mm_struct *mm = current->mm; in print_vma_addr() local
5719 if (!mmap_read_trylock(mm)) in print_vma_addr()
5722 vma = find_vma(mm, ip); in print_vma_addr()
5738 mmap_read_unlock(mm); in print_vma_addr()
5756 if (current->mm) in __might_fault()
5757 might_lock_read(¤t->mm->mmap_lock); in __might_fault()