Lines Matching refs:mm
551 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init() argument
556 vma->vm_mm = mm; in vma_init()
584 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } argument
1505 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
1508 int follow_pte(struct mm_struct *mm, unsigned long address,
1528 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1545 struct mm_struct *mm, unsigned long address, in fixup_user_fault() argument
1567 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1569 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1572 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1587 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
1588 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
1683 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member) in get_mm_counter() argument
1685 long val = atomic_long_read(&mm->rss_stat.count[member]); in get_mm_counter()
1698 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
1701 static inline void add_mm_counter(struct mm_struct *mm, int member, long value) in add_mm_counter() argument
1703 long count = atomic_long_add_return(value, &mm->rss_stat.count[member]); in add_mm_counter()
1705 mm_trace_rss_stat(mm, member, count, value); in add_mm_counter()
1708 static inline void inc_mm_counter(struct mm_struct *mm, int member) in inc_mm_counter() argument
1710 long count = atomic_long_inc_return(&mm->rss_stat.count[member]); in inc_mm_counter()
1712 mm_trace_rss_stat(mm, member, count, 1); in inc_mm_counter()
1715 static inline void dec_mm_counter(struct mm_struct *mm, int member) in dec_mm_counter() argument
1717 long count = atomic_long_dec_return(&mm->rss_stat.count[member]); in dec_mm_counter()
1719 mm_trace_rss_stat(mm, member, count, -1); in dec_mm_counter()
1737 static inline unsigned long get_mm_rss(struct mm_struct *mm) in get_mm_rss() argument
1739 return get_mm_counter(mm, MM_FILEPAGES) + in get_mm_rss()
1740 get_mm_counter(mm, MM_ANONPAGES) + in get_mm_rss()
1741 get_mm_counter(mm, MM_SHMEMPAGES); in get_mm_rss()
1744 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) in get_mm_hiwater_rss() argument
1746 return max(mm->hiwater_rss, get_mm_rss(mm)); in get_mm_hiwater_rss()
1749 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) in get_mm_hiwater_vm() argument
1751 return max(mm->hiwater_vm, mm->total_vm); in get_mm_hiwater_vm()
1754 static inline void update_hiwater_rss(struct mm_struct *mm) in update_hiwater_rss() argument
1756 unsigned long _rss = get_mm_rss(mm); in update_hiwater_rss()
1758 if ((mm)->hiwater_rss < _rss) in update_hiwater_rss()
1759 (mm)->hiwater_rss = _rss; in update_hiwater_rss()
1762 static inline void update_hiwater_vm(struct mm_struct *mm) in update_hiwater_vm() argument
1764 if (mm->hiwater_vm < mm->total_vm) in update_hiwater_vm()
1765 mm->hiwater_vm = mm->total_vm; in update_hiwater_vm()
1768 static inline void reset_mm_hiwater_rss(struct mm_struct *mm) in reset_mm_hiwater_rss() argument
1770 mm->hiwater_rss = get_mm_rss(mm); in reset_mm_hiwater_rss()
1774 struct mm_struct *mm) in setmax_mm_hiwater_rss() argument
1776 unsigned long hiwater_rss = get_mm_hiwater_rss(mm); in setmax_mm_hiwater_rss()
1783 void sync_mm_rss(struct mm_struct *mm);
1785 static inline void sync_mm_rss(struct mm_struct *mm) in sync_mm_rss() argument
1799 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1801 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, in get_locked_pte() argument
1805 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); in get_locked_pte()
1810 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, in __p4d_alloc() argument
1816 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1820 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, in __pud_alloc() argument
1825 static inline void mm_inc_nr_puds(struct mm_struct *mm) {} in mm_inc_nr_puds() argument
1826 static inline void mm_dec_nr_puds(struct mm_struct *mm) {} in mm_dec_nr_puds() argument
1829 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1831 static inline void mm_inc_nr_puds(struct mm_struct *mm) in mm_inc_nr_puds() argument
1833 if (mm_pud_folded(mm)) in mm_inc_nr_puds()
1835 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); in mm_inc_nr_puds()
1838 static inline void mm_dec_nr_puds(struct mm_struct *mm) in mm_dec_nr_puds() argument
1840 if (mm_pud_folded(mm)) in mm_dec_nr_puds()
1842 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); in mm_dec_nr_puds()
1847 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, in __pmd_alloc() argument
1853 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} in mm_inc_nr_pmds() argument
1854 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} in mm_dec_nr_pmds() argument
1857 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1859 static inline void mm_inc_nr_pmds(struct mm_struct *mm) in mm_inc_nr_pmds() argument
1861 if (mm_pmd_folded(mm)) in mm_inc_nr_pmds()
1863 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); in mm_inc_nr_pmds()
1866 static inline void mm_dec_nr_pmds(struct mm_struct *mm) in mm_dec_nr_pmds() argument
1868 if (mm_pmd_folded(mm)) in mm_dec_nr_pmds()
1870 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); in mm_dec_nr_pmds()
1875 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) in mm_pgtables_bytes_init() argument
1877 atomic_long_set(&mm->pgtables_bytes, 0); in mm_pgtables_bytes_init()
1880 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) in mm_pgtables_bytes() argument
1882 return atomic_long_read(&mm->pgtables_bytes); in mm_pgtables_bytes()
1885 static inline void mm_inc_nr_ptes(struct mm_struct *mm) in mm_inc_nr_ptes() argument
1887 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); in mm_inc_nr_ptes()
1890 static inline void mm_dec_nr_ptes(struct mm_struct *mm) in mm_dec_nr_ptes() argument
1892 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes); in mm_dec_nr_ptes()
1896 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {} in mm_pgtables_bytes_init() argument
1897 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm) in mm_pgtables_bytes() argument
1902 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {} in mm_inc_nr_ptes() argument
1903 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} in mm_dec_nr_ptes() argument
1906 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
1916 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, in p4d_alloc() argument
1919 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ? in p4d_alloc()
1923 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, in pud_alloc() argument
1926 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? in pud_alloc()
1931 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in pmd_alloc() argument
1933 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? in pmd_alloc()
1968 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) in pte_lockptr() argument
1993 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) in pte_lockptr() argument
1995 return &mm->page_table_lock; in pte_lockptr()
2024 #define pte_offset_map_lock(mm, pmd, address, ptlp) \ argument
2026 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
2038 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) argument
2040 #define pte_alloc_map(mm, pmd, address) \ argument
2041 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
2043 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ argument
2044 (pte_alloc(mm, pmd) ? \
2045 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
2059 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) in pmd_lockptr() argument
2080 #define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte) argument
2084 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) in pmd_lockptr() argument
2086 return &mm->page_table_lock; in pmd_lockptr()
2092 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) argument
2096 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) in pmd_lock() argument
2098 spinlock_t *ptl = pmd_lockptr(mm, pmd); in pmd_lock()
2109 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud) in pud_lockptr() argument
2111 return &mm->page_table_lock; in pud_lockptr()
2114 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) in pud_lock() argument
2116 spinlock_t *ptl = pud_lockptr(mm, pud); in pud_lock()
2325 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2366 extern int mm_take_all_locks(struct mm_struct *mm);
2367 extern void mm_drop_all_locks(struct mm_struct *mm);
2369 extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2370 extern struct file *get_mm_exe_file(struct mm_struct *mm);
2378 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2383 extern int install_special_mapping(struct mm_struct *mm,
2513 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2514 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2519 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long st… in find_vma_intersection() argument
2521 struct vm_area_struct * vma = find_vma(mm,start_addr); in find_vma_intersection()
2558 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm, in find_exact_vma() argument
2561 struct vm_area_struct *vma = find_vma(mm, vm_start); in find_exact_vma()
2703 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2790 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2792 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2794 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) in get_gate_vma() argument
2799 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) in in_gate_area() argument
2805 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);