/mm/ |
D | mmu_notifier.c | 195 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin() 268 struct mm_struct *mm) in mn_itree_release() argument 273 .mm = mm, in mn_itree_release() 306 struct mm_struct *mm) in mn_hlist_release() argument 325 subscription->ops->release(subscription, mm); in mn_hlist_release() 354 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument 357 mm->notifier_subscriptions; in __mmu_notifier_release() 360 mn_itree_release(subscriptions, mm); in __mmu_notifier_release() 363 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release() 371 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument [all …]
|
D | debug.c | 217 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 247 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, in dump_mm() 249 mm->get_unmapped_area, in dump_mm() 251 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, in dump_mm() 252 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 253 atomic_read(&mm->mm_count), in dump_mm() 254 mm_pgtables_bytes(mm), in dump_mm() 255 mm->map_count, in dump_mm() 256 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 257 (u64)atomic64_read(&mm->pinned_vm), in dump_mm() [all …]
|
D | mmap.c | 80 static void unmap_region(struct mm_struct *mm, 213 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local 220 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1() 223 origbrk = mm->brk; in SYSCALL_DEFINE1() 232 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 234 min_brk = mm->end_data; in SYSCALL_DEFINE1() 236 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 247 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1() 248 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1() 252 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1() [all …]
|
D | debug_vm_pgtable.c | 89 static void __init pte_advanced_tests(struct mm_struct *mm, in pte_advanced_tests() argument 104 set_pte_at(mm, vaddr, ptep, pte); in pte_advanced_tests() 105 ptep_set_wrprotect(mm, vaddr, ptep); in pte_advanced_tests() 108 ptep_get_and_clear(mm, vaddr, ptep); in pte_advanced_tests() 115 set_pte_at(mm, vaddr, ptep, pte); in pte_advanced_tests() 121 ptep_get_and_clear_full(mm, vaddr, ptep, 1); in pte_advanced_tests() 127 set_pte_at(mm, vaddr, ptep, pte); in pte_advanced_tests() 132 ptep_get_and_clear_full(mm, vaddr, ptep, 1); in pte_advanced_tests() 186 static void __init pmd_advanced_tests(struct mm_struct *mm, in pmd_advanced_tests() argument 200 pgtable_trans_huge_deposit(mm, pmdp, pgtable); in pmd_advanced_tests() [all …]
|
D | oom_kill.c | 148 if (likely(t->mm)) in find_lock_task_mm() 225 test_bit(MMF_OOM_SKIP, &p->mm->flags) || in oom_badness() 235 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + in oom_badness() 236 mm_pgtables_bytes(p->mm) / PAGE_SIZE; in oom_badness() 446 task->tgid, task->mm->total_vm, get_mm_rss(task->mm), in dump_task() 447 mm_pgtables_bytes(task->mm), in dump_task() 448 get_mm_counter(task->mm, MM_SWAPENTS), in dump_task() 532 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) in process_shares_mm() argument 537 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm() 539 return t_mm == mm; in process_shares_mm() [all …]
|
D | khugepaged.c | 97 struct mm_struct *mm; member 417 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument 421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) in get_mm_slot() 422 if (mm == mm_slot->mm) in get_mm_slot() 428 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument 431 mm_slot->mm = mm; in insert_to_mm_slots_hash() 432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); in insert_to_mm_slots_hash() 435 static inline int khugepaged_test_exit(struct mm_struct *mm) in khugepaged_test_exit() argument 437 return atomic_read(&mm->mm_users) == 0; in khugepaged_test_exit() 474 int __khugepaged_enter(struct mm_struct *mm) in __khugepaged_enter() argument [all …]
|
D | madvise.c | 73 struct mm_struct *mm = vma->vm_mm; in madvise_behavior() local 139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() 150 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior() 154 error = __split_vma(mm, vma, start, 1); in madvise_behavior() 160 if (unlikely(mm->map_count >= sysctl_max_map_count)) { in madvise_behavior() 164 error = __split_vma(mm, vma, end, 0); in madvise_behavior() 266 struct mm_struct *mm = vma->vm_mm; in madvise_willneed() local 303 mmap_read_unlock(mm); in madvise_willneed() 306 mmap_read_lock(mm); in madvise_willneed() 318 struct mm_struct *mm = tlb->mm; in madvise_cold_or_pageout_pte_range() local [all …]
|
D | mremap.c | 33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr) in get_old_pud() argument 39 pgd = pgd_offset(mm, addr); in get_old_pud() 54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) in get_old_pmd() argument 59 pud = get_old_pud(mm, addr); in get_old_pmd() 70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument 76 pgd = pgd_offset(mm, addr); in alloc_new_pud() 77 p4d = p4d_alloc(mm, pgd, addr); in alloc_new_pud() 81 return pud_alloc(mm, p4d, addr); in alloc_new_pud() 84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument 90 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd() [all …]
|
D | util.c | 278 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_list() argument 288 next = mm->mmap; in __vma_link_list() 289 mm->mmap = vma; in __vma_link_list() 296 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) in __vma_unlink_list() argument 305 mm->mmap = next; in __vma_unlink_list() 371 unsigned long arch_randomize_brk(struct mm_struct *mm) in arch_randomize_brk() argument 375 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk() 377 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk() 434 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) in arch_pick_mmap_layout() argument 442 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; in arch_pick_mmap_layout() [all …]
|
D | nommu.c | 102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 175 mmap_write_lock(current->mm); in __vmalloc_user_flags() 176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 179 mmap_write_unlock(current->mm); in __vmalloc_user_flags() 393 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local 395 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1() 396 return mm->brk; in SYSCALL_DEFINE1() 398 if (mm->brk == brk) in SYSCALL_DEFINE1() 399 return mm->brk; in SYSCALL_DEFINE1() 404 if (brk <= mm->brk) { in SYSCALL_DEFINE1() [all …]
|
D | ksm.c | 124 struct mm_struct *mm; member 203 struct mm_struct *mm; member 391 rmap_item->mm = NULL; /* debug safety */ in free_rmap_item() 424 static struct mm_slot *get_mm_slot(struct mm_struct *mm) in get_mm_slot() argument 428 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) in get_mm_slot() 429 if (slot->mm == mm) in get_mm_slot() 435 static void insert_to_mm_slots_hash(struct mm_struct *mm, in insert_to_mm_slots_hash() argument 438 mm_slot->mm = mm; in insert_to_mm_slots_hash() 439 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); in insert_to_mm_slots_hash() 450 static inline bool ksm_test_exit(struct mm_struct *mm) in ksm_test_exit() argument [all …]
|
D | gup.c | 429 struct mm_struct *mm = vma->vm_mm; in follow_page_pte() local 455 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 472 migration_entry_wait(mm, pmd, address); in follow_page_pte() 595 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask() local 626 pmd_migration_entry_wait(mm, pmd); in follow_pmd_mask() 637 ptl = pmd_lock(mm, pmd); in follow_pmd_mask() 650 ptl = pmd_lock(mm, pmd); in follow_pmd_mask() 659 pmd_migration_entry_wait(mm, pmd); in follow_pmd_mask() 690 ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; in follow_pmd_mask() 710 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask() local [all …]
|
D | vmacache.c | 30 static inline bool vmacache_valid_mm(struct mm_struct *mm) in vmacache_valid_mm() argument 32 return current->mm == mm && !(current->flags & PF_KTHREAD); in vmacache_valid_mm() 41 static bool vmacache_valid(struct mm_struct *mm) in vmacache_valid() argument 45 if (!vmacache_valid_mm(mm)) in vmacache_valid() 49 if (mm->vmacache_seqnum != curr->vmacache.seqnum) { in vmacache_valid() 54 curr->vmacache.seqnum = mm->vmacache_seqnum; in vmacache_valid() 61 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) in vmacache_find() argument 68 if (!vmacache_valid(mm)) in vmacache_find() 76 if (WARN_ON_ONCE(vma->vm_mm != mm)) in vmacache_find() 92 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, in vmacache_find_exact() argument [all …]
|
D | memory.c | 183 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count, in mm_trace_rss_stat() argument 190 trace_rss_stat(mm, member, count); in mm_trace_rss_stat() 196 void sync_mm_rss(struct mm_struct *mm) in sync_mm_rss() argument 202 add_mm_counter(mm, i, current->rss_stat.count[i]); in sync_mm_rss() 209 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument 213 if (likely(task->mm == mm)) in add_mm_counter_fast() 216 add_mm_counter(mm, member, val); in add_mm_counter_fast() 218 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) argument 219 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) argument 228 sync_mm_rss(task->mm); in check_sync_rss_stat() [all …]
|
D | mlock.c | 537 struct mm_struct *mm = vma->vm_mm; in mlock_fixup() local 545 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || in mlock_fixup() 551 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup() 560 ret = split_vma(mm, vma, start, 1); in mlock_fixup() 566 ret = split_vma(mm, vma, end, 0); in mlock_fixup() 580 mm->locked_vm += nr_pages; in mlock_fixup() 613 vma = find_vma(current->mm, start); in apply_vma_lock_flags() 655 static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, in count_mm_mlocked_page_nr() argument 661 if (mm == NULL) in count_mm_mlocked_page_nr() 662 mm = current->mm; in count_mm_mlocked_page_nr() [all …]
|
D | rmap.c | 187 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() local 209 spin_lock(&mm->page_table_lock); in __anon_vma_prepare() 217 spin_unlock(&mm->page_table_lock); in __anon_vma_prepare() 636 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) in set_tlb_ubc_flush_pending() argument 640 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); in set_tlb_ubc_flush_pending() 648 mm->tlb_flush_batched = true; in set_tlb_ubc_flush_pending() 663 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) in should_defer_flush() argument 671 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) in should_defer_flush() 693 void flush_tlb_batched_pending(struct mm_struct *mm) in flush_tlb_batched_pending() argument 695 if (data_race(mm->tlb_flush_batched)) { in flush_tlb_batched_pending() [all …]
|
D | mprotect.c | 328 struct mm_struct *mm = vma->vm_mm; in change_protection_range() local 335 pgd = pgd_offset(mm, addr); in change_protection_range() 337 inc_tlb_flush_pending(mm); in change_protection_range() 349 dec_tlb_flush_pending(mm); in change_protection_range() 402 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup() local 425 error = walk_page_range(current->mm, start, end, in mprotect_fixup() 439 if (!may_expand_vm(mm, newflags, nrpages) && in mprotect_fixup() 440 may_expand_vm(mm, oldflags, nrpages)) in mprotect_fixup() 445 if (security_vm_enough_memory_mm(mm, charged)) in mprotect_fixup() 455 *pprev = vma_merge(mm, *pprev, start, end, newflags, in mprotect_fixup() [all …]
|
D | pgtable-generic.c | 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() local 95 pte = ptep_get_and_clear(mm, address, ptep); in ptep_clear_flush() 96 if (pte_accessible(mm, pte)) in ptep_clear_flush() 161 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, in pgtable_trans_huge_deposit() argument 164 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_deposit() 167 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit() 170 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); in pgtable_trans_huge_deposit() 171 pmd_huge_pte(mm, pmdp) = pgtable; in pgtable_trans_huge_deposit() 177 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) in pgtable_trans_huge_withdraw() argument 181 assert_spin_locked(pmd_lockptr(mm, pmdp)); in pgtable_trans_huge_withdraw() [all …]
|
D | migrate.c | 290 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, in __migration_entry_wait() argument 324 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, in migration_entry_wait() argument 327 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() 329 __migration_entry_wait(mm, ptep, ptl); in migration_entry_wait() 333 struct mm_struct *mm, pte_t *pte) in migration_entry_wait_huge() argument 335 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge() 336 __migration_entry_wait(mm, pte, ptl); in migration_entry_wait_huge() 340 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) in pmd_migration_entry_wait() argument 345 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait() 1584 static int do_move_pages_to_node(struct mm_struct *mm, in do_move_pages_to_node() argument [all …]
|
D | pagewalk.c | 53 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in walk_pte_range() 212 pgd = pgd_offset(walk->mm, addr); in walk_pgd_range() 259 pte = huge_pte_offset(walk->mm, addr & hmask, sz); in walk_hugetlb_range() 379 int walk_page_range(struct mm_struct *mm, unsigned long start, in walk_page_range() argument 388 .mm = mm, in walk_page_range() 395 if (!walk.mm) in walk_page_range() 398 mmap_assert_locked(walk.mm); in walk_page_range() 400 vma = find_vma(walk.mm, start); in walk_page_range() 444 int walk_page_range_novma(struct mm_struct *mm, unsigned long start, in walk_page_range_novma() argument 451 .mm = mm, in walk_page_range_novma() [all …]
|
D | process_vm_access.c | 75 struct mm_struct *mm, in process_vm_rw_single_vec() argument 105 mmap_read_lock(mm); in process_vm_rw_single_vec() 106 pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, in process_vm_rw_single_vec() 110 mmap_read_unlock(mm); in process_vm_rw_single_vec() 159 struct mm_struct *mm; in process_vm_rw_core() local 203 mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); in process_vm_rw_core() 204 if (!mm || IS_ERR(mm)) { in process_vm_rw_core() 205 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; in process_vm_rw_core() 218 iter, process_pages, mm, task, vm_write); in process_vm_rw_core() 229 mmput(mm); in process_vm_rw_core()
|
D | msync.c | 35 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE3() local 60 mmap_read_lock(mm); in SYSCALL_DEFINE3() 61 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 91 mmap_read_unlock(mm); in SYSCALL_DEFINE3() 96 mmap_read_lock(mm); in SYSCALL_DEFINE3() 97 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 107 mmap_read_unlock(mm); in SYSCALL_DEFINE3()
|
D | huge_memory.c | 127 struct page *mm_get_huge_zero_page(struct mm_struct *mm) in mm_get_huge_zero_page() argument 129 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page() 135 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_get_huge_zero_page() 141 void mm_put_huge_zero_page(struct mm_struct *mm) in mm_put_huge_zero_page() argument 143 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) in mm_put_huge_zero_page() 543 ret = current->mm->get_unmapped_area(filp, addr, len_pad, in __thp_get_unmapped_area() 577 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in thp_get_unmapped_area() 695 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, in set_huge_zero_page() argument 705 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page() 706 set_pmd_at(mm, haddr, pmd, entry); in set_huge_zero_page() [all …]
|
D | pgalloc-track.h | 6 static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, in p4d_alloc_track() argument 11 if (__p4d_alloc(mm, pgd, address)) in p4d_alloc_track() 19 static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, in pud_alloc_track() argument 24 if (__pud_alloc(mm, p4d, address)) in pud_alloc_track() 32 static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, in pmd_alloc_track() argument 37 if (__pmd_alloc(mm, pud, address)) in pmd_alloc_track()
|
/mm/damon/ |
D | vaddr.c | 44 struct mm_struct *mm; in damon_get_mm() local 50 mm = get_task_mm(task); in damon_get_mm() 52 return mm; in damon_get_mm() 175 struct mm_struct *mm; in damon_va_three_regions() local 178 mm = damon_get_mm(t); in damon_va_three_regions() 179 if (!mm) in damon_va_three_regions() 182 mmap_read_lock(mm); in damon_va_three_regions() 183 rc = __damon_va_three_regions(mm->mmap, regions); in damon_va_three_regions() 184 mmap_read_unlock(mm); in damon_va_three_regions() 186 mmput(mm); in damon_va_three_regions() [all …]
|