• Home
  • Raw
  • Download

Lines Matching refs:mm

24 void task_mem(struct seq_file *m, struct mm_struct *mm)  in task_mem()  argument
29 anon = get_mm_counter(mm, MM_ANONPAGES); in task_mem()
30 file = get_mm_counter(mm, MM_FILEPAGES); in task_mem()
31 shmem = get_mm_counter(mm, MM_SHMEMPAGES); in task_mem()
40 hiwater_vm = total_vm = mm->total_vm; in task_mem()
41 if (hiwater_vm < mm->hiwater_vm) in task_mem()
42 hiwater_vm = mm->hiwater_vm; in task_mem()
44 if (hiwater_rss < mm->hiwater_rss) in task_mem()
45 hiwater_rss = mm->hiwater_rss; in task_mem()
47 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; in task_mem()
48 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; in task_mem()
49 swap = get_mm_counter(mm, MM_SWAPENTS); in task_mem()
50 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); in task_mem()
51 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); in task_mem()
71 mm->locked_vm << (PAGE_SHIFT-10), in task_mem()
72 mm->pinned_vm << (PAGE_SHIFT-10), in task_mem()
78 mm->data_vm << (PAGE_SHIFT-10), in task_mem()
79 mm->stack_vm << (PAGE_SHIFT-10), text, lib, in task_mem()
83 hugetlb_report_usage(m, mm); in task_mem()
86 unsigned long task_vsize(struct mm_struct *mm) in task_vsize() argument
88 return PAGE_SIZE * mm->total_vm; in task_vsize()
91 unsigned long task_statm(struct mm_struct *mm, in task_statm() argument
95 *shared = get_mm_counter(mm, MM_FILEPAGES) + in task_statm()
96 get_mm_counter(mm, MM_SHMEMPAGES); in task_statm()
97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm()
99 *data = mm->data_vm + mm->stack_vm; in task_statm()
100 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); in task_statm()
101 return mm->total_vm; in task_statm()
133 struct mm_struct *mm = vma->vm_mm; in seq_print_vma_name() local
154 pages_pinned = get_user_pages_remote(current, mm, in seq_print_vma_name()
182 struct mm_struct *mm = priv->mm; in vma_stop() local
185 up_read(&mm->mmap_sem); in vma_stop()
186 mmput(mm); in vma_stop()
207 struct mm_struct *mm; in m_start() local
219 mm = priv->mm; in m_start()
220 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) in m_start()
223 down_read(&mm->mmap_sem); in m_start()
225 priv->tail_vma = get_gate_vma(mm); in m_start()
228 vma = find_vma(mm, last_addr - 1); in m_start()
236 if (pos < mm->map_count) { in m_start()
237 for (vma = mm->mmap; pos; pos--) { in m_start()
245 if (pos == mm->map_count && priv->tail_vma) in m_start()
285 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); in proc_maps_open()
286 if (IS_ERR(priv->mm)) { in proc_maps_open()
287 int err = PTR_ERR(priv->mm); in proc_maps_open()
301 if (priv->mm) in proc_map_release()
302 mmdrop(priv->mm); in proc_map_release()
333 struct mm_struct *mm = vma->vm_mm; in show_map_vma() local
383 if (!mm) { in show_map_vma()
388 if (vma->vm_start <= mm->brk && in show_map_vma()
389 vma->vm_end >= mm->start_brk) { in show_map_vma()
781 .mm = vma->vm_mm, in show_smap()
1065 struct mm_struct *mm; in clear_refs_write() local
1086 mm = get_task_mm(task); in clear_refs_write()
1087 if (mm) { in clear_refs_write()
1094 .mm = mm, in clear_refs_write()
1099 if (down_write_killable(&mm->mmap_sem)) { in clear_refs_write()
1108 reset_mm_hiwater_rss(mm); in clear_refs_write()
1109 up_write(&mm->mmap_sem); in clear_refs_write()
1113 down_read(&mm->mmap_sem); in clear_refs_write()
1115 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1118 up_read(&mm->mmap_sem); in clear_refs_write()
1119 if (down_write_killable(&mm->mmap_sem)) { in clear_refs_write()
1123 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1127 downgrade_write(&mm->mmap_sem); in clear_refs_write()
1130 mmu_notifier_invalidate_range_start(mm, 0, -1); in clear_refs_write()
1132 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); in clear_refs_write()
1134 mmu_notifier_invalidate_range_end(mm, 0, -1); in clear_refs_write()
1135 flush_tlb_mm(mm); in clear_refs_write()
1136 up_read(&mm->mmap_sem); in clear_refs_write()
1138 mmput(mm); in clear_refs_write()
1196 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole()
1320 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range()
1412 struct mm_struct *mm = file->private_data; in pagemap_read() local
1421 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) in pagemap_read()
1447 pagemap_walk.mm = mm; in pagemap_read()
1453 end_vaddr = mm->task_size; in pagemap_read()
1456 if (svpfn > mm->task_size >> PAGE_SHIFT) in pagemap_read()
1475 down_read(&mm->mmap_sem); in pagemap_read()
1477 up_read(&mm->mmap_sem); in pagemap_read()
1496 mmput(mm); in pagemap_read()
1503 struct mm_struct *mm; in pagemap_open() local
1505 mm = proc_mem_open(inode, PTRACE_MODE_READ); in pagemap_open()
1506 if (IS_ERR(mm)) in pagemap_open()
1507 return PTR_ERR(mm); in pagemap_open()
1508 file->private_data = mm; in pagemap_open()
1514 struct mm_struct *mm = file->private_data; in pagemap_release() local
1516 if (mm) in pagemap_release()
1517 mmdrop(mm); in pagemap_release()
1648 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()
1697 struct mm_struct *mm = vma->vm_mm; in show_numa_map() local
1702 .mm = mm, in show_numa_map()
1708 if (!mm) in show_numa_map()
1727 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()