/fs/proc/ |
D | task_nommu.c | 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 28 down_read(&mm->mmap_sem); in task_mem() 29 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem() 42 if (atomic_read(&mm->mm_count) > 1 || in task_mem() 52 if (atomic_read(&mm->mm_count) > 1) in task_mem() 53 sbytes += kobjsize(mm); in task_mem() 55 bytes += kobjsize(mm); in task_mem() 80 up_read(&mm->mmap_sem); in task_mem() 83 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument 89 down_read(&mm->mmap_sem); in task_vsize() [all …]
|
D | task_mmu.c | 30 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 35 anon = get_mm_counter(mm, MM_ANONPAGES); in task_mem() 36 file = get_mm_counter(mm, MM_FILEPAGES); in task_mem() 37 shmem = get_mm_counter(mm, MM_SHMEMPAGES); in task_mem() 46 hiwater_vm = total_vm = mm->total_vm; in task_mem() 47 if (hiwater_vm < mm->hiwater_vm) in task_mem() 48 hiwater_vm = mm->hiwater_vm; in task_mem() 50 if (hiwater_rss < mm->hiwater_rss) in task_mem() 51 hiwater_rss = mm->hiwater_rss; in task_mem() 54 text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); in task_mem() [all …]
|
D | array.c | 389 static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) in task_core_dumping() argument 391 seq_put_decimal_ull(m, "CoreDumping:\t", !!mm->core_state); in task_core_dumping() 395 static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) in task_thp_status() argument 400 thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags); in task_thp_status() 407 struct mm_struct *mm = get_task_mm(task); in proc_pid_status() local 415 if (mm) { in proc_pid_status() 416 task_mem(m, mm); in proc_pid_status() 417 task_core_dumping(m, mm); in proc_pid_status() 418 task_thp_status(m, mm); in proc_pid_status() 419 mmput(mm); in proc_pid_status() [all …]
|
D | base.c | 217 static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf, in get_mm_proctitle() argument 232 got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON); in get_mm_proctitle() 254 static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf, in get_mm_cmdline() argument 262 if (!mm->env_end) in get_mm_cmdline() 265 spin_lock(&mm->arg_lock); in get_mm_cmdline() 266 arg_start = mm->arg_start; in get_mm_cmdline() 267 arg_end = mm->arg_end; in get_mm_cmdline() 268 env_start = mm->env_start; in get_mm_cmdline() 269 env_end = mm->env_end; in get_mm_cmdline() 270 spin_unlock(&mm->arg_lock); in get_mm_cmdline() [all …]
|
D | internal.h | 288 struct mm_struct *mm; member
|
D | inode.c | 320 get_area = current->mm->get_unmapped_area; in proc_reg_get_unmapped_area()
|
/fs/ |
D | userfaultfd.c | 78 struct mm_struct *mm; member 175 mmdrop(ctx->mm); in userfaultfd_ctx_put() 233 struct mm_struct *mm = ctx->mm; in userfaultfd_huge_must_wait() local 237 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); in userfaultfd_huge_must_wait() 239 ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); in userfaultfd_huge_must_wait() 281 struct mm_struct *mm = ctx->mm; in userfaultfd_must_wait() local 289 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); in userfaultfd_must_wait() 291 pgd = pgd_offset(mm, address); in userfaultfd_must_wait() 354 struct mm_struct *mm = vmf->vma->vm_mm; in handle_userfault() local 379 WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); in handle_userfault() [all …]
|
D | binfmt_elf_fdpic.c | 359 current->mm->start_code = 0; in load_elf_fdpic_binary() 360 current->mm->end_code = 0; in load_elf_fdpic_binary() 361 current->mm->start_stack = 0; in load_elf_fdpic_binary() 362 current->mm->start_data = 0; in load_elf_fdpic_binary() 363 current->mm->end_data = 0; in load_elf_fdpic_binary() 364 current->mm->context.exec_fdpic_loadmap = 0; in load_elf_fdpic_binary() 365 current->mm->context.interp_fdpic_loadmap = 0; in load_elf_fdpic_binary() 370 ¤t->mm->start_stack, in load_elf_fdpic_binary() 371 ¤t->mm->start_brk); in load_elf_fdpic_binary() 373 retval = setup_arg_pages(bprm, current->mm->start_stack, in load_elf_fdpic_binary() [all …]
|
D | exec.c | 185 struct mm_struct *mm = current->mm; in acct_arg_size() local 188 if (!mm || !diff) in acct_arg_size() 192 add_mm_counter(mm, MM_ANONPAGES, diff); in acct_arg_size() 217 ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, in get_arg_page() 247 struct mm_struct *mm = bprm->mm; in __bprm_mm_init() local 249 bprm->vma = vma = vm_area_alloc(mm); in __bprm_mm_init() 254 if (down_write_killable(&mm->mmap_sem)) { in __bprm_mm_init() 271 err = insert_vm_struct(mm, vma); in __bprm_mm_init() 275 mm->stack_vm = mm->total_vm = 1; in __bprm_mm_init() 276 arch_bprm_mm_init(mm, vma); in __bprm_mm_init() [all …]
|
D | binfmt_flat.c | 130 sp = (unsigned long __user *)current->mm->start_stack; in create_flat_tables() 138 current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN; in create_flat_tables() 139 sp = (unsigned long __user *)current->mm->start_stack; in create_flat_tables() 150 current->mm->arg_start = (unsigned long)p; in create_flat_tables() 159 current->mm->arg_end = (unsigned long)p; in create_flat_tables() 161 current->mm->env_start = (unsigned long) p; in create_flat_tables() 170 current->mm->env_end = (unsigned long)p; in create_flat_tables() 393 ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset); in old_reloc() 395 ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset); in old_reloc() 405 val += current->mm->start_code; in old_reloc() [all …]
|
D | binfmt_aout.c | 90 current->mm->arg_start = (unsigned long) p; in create_aout_tables() 99 current->mm->arg_end = current->mm->env_start = (unsigned long) p; in create_aout_tables() 108 current->mm->env_end = (unsigned long) p; in create_aout_tables() 166 current->mm->end_code = ex.a_text + in load_aout_binary() 167 (current->mm->start_code = N_TXTADDR(ex)); in load_aout_binary() 168 current->mm->end_data = ex.a_data + in load_aout_binary() 169 (current->mm->start_data = N_DATADDR(ex)); in load_aout_binary() 170 current->mm->brk = ex.a_bss + in load_aout_binary() 171 (current->mm->start_brk = N_BSSADDR(ex)); in load_aout_binary() 242 retval = set_brk(current->mm->start_brk, current->mm->brk); in load_aout_binary() [all …]
|
D | coredump.c | 162 exe_file = get_mm_exe_file(current->mm); in cn_print_exe_file() 350 if (t != current && t->mm) { in zap_process() 360 static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, in zap_threads() argument 369 mm->core_state = core_state; in zap_threads() 379 if (atomic_read(&mm->mm_users) == nr + 1) in zap_threads() 419 if (unlikely(!p->mm)) in zap_threads() 421 if (unlikely(p->mm == mm)) { in zap_threads() 439 struct mm_struct *mm = tsk->mm; in coredump_wait() local 446 if (down_write_killable(&mm->mmap_sem)) in coredump_wait() 449 if (!mm->core_state) in coredump_wait() [all …]
|
D | binfmt_elf.c | 117 current->mm->start_brk = current->mm->brk = end; in set_brk() 229 elf_info = (elf_addr_t *)current->mm->saved_auxv; in create_elf_tables() 279 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); in create_elf_tables() 302 vma = find_extend_vma(current->mm, bprm->p); in create_elf_tables() 311 p = current->mm->arg_end = current->mm->arg_start; in create_elf_tables() 323 current->mm->arg_end = p; in create_elf_tables() 326 current->mm->env_end = current->mm->env_start = p; in create_elf_tables() 338 current->mm->env_end = p; in create_elf_tables() 1103 current->mm->end_code = end_code; in load_elf_binary() 1104 current->mm->start_code = start_code; in load_elf_binary() [all …]
|
D | aio.c | 330 struct mm_struct *mm = vma->vm_mm; in aio_ring_mremap() local 334 spin_lock(&mm->ioctx_lock); in aio_ring_mremap() 336 table = rcu_dereference(mm->ioctx_table); in aio_ring_mremap() 351 spin_unlock(&mm->ioctx_lock); in aio_ring_mremap() 463 struct mm_struct *mm = current->mm; in aio_setup_ring() local 522 if (down_write_killable(&mm->mmap_sem)) { in aio_setup_ring() 531 up_write(&mm->mmap_sem); in aio_setup_ring() 633 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) in ioctx_add_table() argument 639 spin_lock(&mm->ioctx_lock); in ioctx_add_table() 640 table = rcu_dereference_raw(mm->ioctx_table); in ioctx_add_table() [all …]
|
D | Kconfig | 210 <file:Documentation/admin-guide/mm/hugetlbpage.rst> for details.
|
D | io_uring.c | 3195 mmgrab(current->mm); in io_sq_offload_start() 3196 ctx->sqo_mm = current->mm; in io_sq_offload_start() 3481 down_read(¤t->mm->mmap_sem); in io_sqe_buffer_register() 3499 up_read(¤t->mm->mmap_sem); in io_sqe_buffer_register()
|
/fs/hugetlbfs/ |
D | inode.c | 202 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area() local 220 vma = find_vma(mm, addr); in hugetlb_get_unmapped_area() 427 vma_init(&pseudo_vma, current->mm); in remove_inode_hugepages() 578 struct mm_struct *mm = current->mm; in hugetlbfs_fallocate() local 616 vma_init(&pseudo_vma, mm); in hugetlbfs_fallocate()
|
/fs/ramfs/ |
D | file-mmu.c | 38 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); in ramfs_mmu_get_unmapped_area()
|
/fs/notify/inotify/ |
D | inotify_user.c | 645 group->memcg = get_mem_cgroup_from_mm(current->mm); in inotify_new_group()
|
/fs/notify/fanotify/ |
D | fanotify_user.c | 824 group->memcg = get_mem_cgroup_from_mm(current->mm); in SYSCALL_DEFINE2()
|