Lines Matching defs:mm
193 * per mm struct. Users can overwrite this number by sysctl but there is a
253 * The idea being to have a "virtual" mm in the same way
255 * mm details, and allowing different kinds of memory mappings
362 * support core mm.
482 /* This mask defines which mm->def_flags a process can inherit its parent */
655 * in mm/mempolicy.c will do this automatically.
720 struct mm_struct *mm = vma->vm_mm;
727 rcuwait_wake_up(&mm->vma_writer_wait);
736 * reused and attached to a different mm before we lock it.
740 static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
752 if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
771 * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
779 if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
829 * mm->mm_lock_seq can't be concurrently modified.
907 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
913 static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
925 static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
950 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
953 vma->vm_mm = mm;
1075 if (!current->mm)
1078 if (current->mm != vma->vm_mm)
1188 /* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1189 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1475 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
2063 * should break the cow immediately for an anon page on the src mm.
2493 * drop the page in the mm, either by truncation or unmapping of the vma. By
2517 * Use the processor id as a fall-back when the mm cid feature is
2602 struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2609 extern int fixup_user_fault(struct mm_struct *mm,
2625 static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2644 static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2649 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2652 long get_user_pages_remote(struct mm_struct *mm,
2656 long pin_user_pages_remote(struct mm_struct *mm,
2664 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2676 got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2681 vma = vma_lookup(mm, addr);
2709 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2710 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2765 static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2767 return percpu_counter_read_positive(&mm->rss_stat[member]);
2770 static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
2772 return percpu_counter_sum_positive(&mm->rss_stat[member]);
2775 void mm_trace_rss_stat(struct mm_struct *mm, int member);
2777 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2779 percpu_counter_add(&mm->rss_stat[member], value);
2781 mm_trace_rss_stat(mm, member);
2784 static inline void inc_mm_counter(struct mm_struct *mm, int member)
2786 percpu_counter_inc(&mm->rss_stat[member]);
2788 mm_trace_rss_stat(mm, member);
2791 static inline void dec_mm_counter(struct mm_struct *mm, int member)
2793 percpu_counter_dec(&mm->rss_stat[member]);
2795 mm_trace_rss_stat(mm, member);
2813 static inline unsigned long get_mm_rss(struct mm_struct *mm)
2815 return get_mm_counter(mm, MM_FILEPAGES) +
2816 get_mm_counter(mm, MM_ANONPAGES) +
2817 get_mm_counter(mm, MM_SHMEMPAGES);
2820 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2822 return max(mm->hiwater_rss, get_mm_rss(mm));
2825 static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2827 return max(mm->hiwater_vm, mm->total_vm);
2830 static inline void update_hiwater_rss(struct mm_struct *mm)
2832 unsigned long _rss = get_mm_rss(mm);
2834 if ((mm)->hiwater_rss < _rss)
2835 (mm)->hiwater_rss = _rss;
2838 static inline void update_hiwater_vm(struct mm_struct *mm)
2840 if (mm->hiwater_vm < mm->total_vm)
2841 mm->hiwater_vm = mm->total_vm;
2844 static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2846 mm->hiwater_rss = get_mm_rss(mm);
2850 struct mm_struct *mm)
2852 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2901 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2903 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2907 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2912 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2918 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2922 static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2927 static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2928 static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2931 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2933 static inline void mm_inc_nr_puds(struct mm_struct *mm)
2935 if (mm_pud_folded(mm))
2937 atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2940 static inline void mm_dec_nr_puds(struct mm_struct *mm)
2942 if (mm_pud_folded(mm))
2944 atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2949 static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2955 static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2956 static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2959 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2961 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2963 if (mm_pmd_folded(mm))
2965 atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2968 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2970 if (mm_pmd_folded(mm))
2972 atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2977 static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2979 atomic_long_set(&mm->pgtables_bytes, 0);
2982 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2984 return atomic_long_read(&mm->pgtables_bytes);
2987 static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2989 atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2992 static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2994 atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2998 static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2999 static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
3004 static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
3005 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
3008 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
3013 static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
3016 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
3020 static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
3023 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
3027 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
3029 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
3116 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3121 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3146 * We use mm->page_table_lock to guard all pagetable pages of the mm.
3148 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
3150 return &mm->page_table_lock;
3152 static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
3154 return &mm->page_table_lock;
3187 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3189 static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3194 __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
3198 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3200 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3209 #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3211 #define pte_alloc_map(mm, pmd, address) \
3212 (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3214 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \
3215 (pte_alloc(mm, pmd) ? \
3216 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
3235 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3256 #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3260 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3262 return &mm->page_table_lock;
3268 #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3272 static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3274 spinlock_t *ptl = pmd_lockptr(mm, pmd);
3306 static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3308 return &mm->page_table_lock;
3311 static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3313 spinlock_t *ptl = pud_lockptr(mm, pud);
3421 /* please see mm/page_alloc.c */
3481 extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
3500 extern int mm_take_all_locks(struct mm_struct *mm);
3501 extern void mm_drop_all_locks(struct mm_struct *mm);
3503 extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3504 extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3505 extern struct file *get_mm_exe_file(struct mm_struct *mm);
3513 extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3539 extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3543 struct mm_struct *mm, unsigned long start,
3547 extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3561 /* This takes the mm semaphore itself */
3596 struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3602 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3603 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3610 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3615 * @mm: The process address space.
3621 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3623 return mtree_load(&mm->mm_mt, addr);
3667 static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3670 struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3813 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3815 extern int apply_to_existing_page_range(struct mm_struct *mm,
3961 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3963 extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
3965 static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3970 static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3976 extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
4076 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
4323 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4328 madvise_set_anon_name(struct mm_struct *mm, unsigned long start,