/include/linux/ |
D | mmap_lock.h | 4 static inline void mmap_init_lock(struct mm_struct *mm) in mmap_init_lock() argument 6 init_rwsem(&mm->mmap_sem); in mmap_init_lock() 9 static inline void mmap_write_lock(struct mm_struct *mm) in mmap_write_lock() argument 11 down_write(&mm->mmap_sem); in mmap_write_lock() 14 static inline int mmap_write_lock_killable(struct mm_struct *mm) in mmap_write_lock_killable() argument 16 return down_write_killable(&mm->mmap_sem); in mmap_write_lock_killable() 19 static inline bool mmap_write_trylock(struct mm_struct *mm) in mmap_write_trylock() argument 21 return down_write_trylock(&mm->mmap_sem) != 0; in mmap_write_trylock() 24 static inline void mmap_write_unlock(struct mm_struct *mm) in mmap_write_unlock() argument 26 up_write(&mm->mmap_sem); in mmap_write_unlock() [all …]
|
D | mmu_notifier.h | 67 struct mm_struct *mm; member 99 struct mm_struct *mm); 111 struct mm_struct *mm, 121 struct mm_struct *mm, 132 struct mm_struct *mm, 140 struct mm_struct *mm, 217 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, 230 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); 253 struct mm_struct *mm; member 261 static inline int mm_has_notifiers(struct mm_struct *mm) in mm_has_notifiers() argument [all …]
|
D | khugepaged.h | 14 extern int __khugepaged_enter(struct mm_struct *mm); 15 extern void __khugepaged_exit(struct mm_struct *mm); 20 extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); 22 static inline void collapse_pte_mapped_thp(struct mm_struct *mm, in collapse_pte_mapped_thp() argument 42 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) in khugepaged_fork() argument 45 return __khugepaged_enter(mm); in khugepaged_fork() 49 static inline void khugepaged_exit(struct mm_struct *mm) in khugepaged_exit() argument 51 if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) in khugepaged_exit() 52 __khugepaged_exit(mm); in khugepaged_exit() 68 static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) in khugepaged_fork() argument [all …]
|
D | ksm.h | 24 int __ksm_enter(struct mm_struct *mm); 25 void __ksm_exit(struct mm_struct *mm); 27 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) in ksm_fork() argument 30 return __ksm_enter(mm); in ksm_fork() 34 static inline void ksm_exit(struct mm_struct *mm) in ksm_exit() argument 36 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) in ksm_exit() 37 __ksm_exit(mm); in ksm_exit() 59 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) in ksm_fork() argument 64 static inline void ksm_exit(struct mm_struct *mm) in ksm_exit() argument
|
D | vmacache.h | 14 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, 18 extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, 23 static inline void vmacache_invalidate(struct mm_struct *mm) in vmacache_invalidate() argument 25 mm->vmacache_seqnum++; in vmacache_invalidate()
|
D | pkeys.h | 11 #define execute_only_pkey(mm) (0) argument 21 static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) in mm_pkey_is_allocated() argument 26 static inline int mm_pkey_alloc(struct mm_struct *mm) in mm_pkey_alloc() argument 31 static inline int mm_pkey_free(struct mm_struct *mm, int pkey) in mm_pkey_free() argument
|
D | mm.h | 551 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init() argument 556 vma->vm_mm = mm; in vma_init() 584 #define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) } argument 1505 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address, 1508 int follow_pte(struct mm_struct *mm, unsigned long address, 1528 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, 1545 struct mm_struct *mm, unsigned long address, in fixup_user_fault() argument 1567 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, 1569 extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1572 long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, [all …]
|
D | elf-randomize.h | 13 # define arch_randomize_brk(mm) (mm->brk) argument 17 extern unsigned long arch_randomize_brk(struct mm_struct *mm);
|
D | oom.h | 84 static inline bool mm_is_oom_victim(struct mm_struct *mm) in mm_is_oom_victim() argument 86 return test_bit(MMF_OOM_VICTIM, &mm->flags); in mm_is_oom_victim() 102 static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) in check_stable_address_space() argument 104 if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags))) in check_stable_address_space() 109 bool __oom_reap_task_mm(struct mm_struct *mm);
|
D | hugetlb.h | 91 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 112 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); 119 pte_t *huge_pte_alloc(struct mm_struct *mm, 121 pte_t *huge_pte_offset(struct mm_struct *mm, 123 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); 126 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 133 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, 135 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address, 156 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, in huge_pmd_unshare() argument 169 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) argument [all …]
|
D | mm_types.h | 556 static inline void mm_init_cpumask(struct mm_struct *mm) in mm_init_cpumask() argument 558 unsigned long cpu_bitmap = (unsigned long)mm; in mm_init_cpumask() 565 static inline cpumask_t *mm_cpumask(struct mm_struct *mm) in mm_cpumask() argument 567 return (struct cpumask *)&mm->cpu_bitmap; in mm_cpumask() 571 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 576 static inline void init_tlb_flush_pending(struct mm_struct *mm) in init_tlb_flush_pending() argument 578 atomic_set(&mm->tlb_flush_pending, 0); in init_tlb_flush_pending() 581 static inline void inc_tlb_flush_pending(struct mm_struct *mm) in inc_tlb_flush_pending() argument 583 atomic_inc(&mm->tlb_flush_pending); in inc_tlb_flush_pending() 622 static inline void dec_tlb_flush_pending(struct mm_struct *mm) in dec_tlb_flush_pending() argument [all …]
|
D | mmu_context.h | 9 void use_mm(struct mm_struct *mm); 10 void unuse_mm(struct mm_struct *mm);
|
/include/linux/sched/ |
D | mm.h | 34 static inline void mmgrab(struct mm_struct *mm) in mmgrab() argument 36 atomic_inc(&mm->mm_count); in mmgrab() 39 extern void __mmdrop(struct mm_struct *mm); 41 static inline void mmdrop(struct mm_struct *mm) in mmdrop() argument 48 if (unlikely(atomic_dec_and_test(&mm->mm_count))) in mmdrop() 49 __mmdrop(mm); in mmdrop() 52 void mmdrop(struct mm_struct *mm); 74 static inline bool mmget_still_valid(struct mm_struct *mm) in mmget_still_valid() argument 76 return likely(!mm->core_state); in mmget_still_valid() 95 static inline void mmget(struct mm_struct *mm) in mmget() argument [all …]
|
/include/drm/ |
D | drm_mm.h | 163 struct drm_mm *mm; member 225 struct drm_mm *mm; member 272 static inline bool drm_mm_initialized(const struct drm_mm *mm) in drm_mm_initialized() argument 274 return mm->hole_stack.next; in drm_mm_initialized() 349 #define drm_mm_nodes(mm) (&(mm)->head_node.node_list) argument 359 #define drm_mm_for_each_node(entry, mm) \ argument 360 list_for_each_entry(entry, drm_mm_nodes(mm), node_list) 371 #define drm_mm_for_each_node_safe(entry, next, mm) \ argument 372 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) 390 #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \ argument [all …]
|
/include/asm-generic/ |
D | pgalloc.h | 19 static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) in __pte_alloc_one_kernel() argument 31 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) in pte_alloc_one_kernel() argument 33 return __pte_alloc_one_kernel(mm); in pte_alloc_one_kernel() 42 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument 59 static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) in __pte_alloc_one() argument 83 static inline pgtable_t pte_alloc_one(struct mm_struct *mm) in pte_alloc_one() argument 85 return __pte_alloc_one(mm, GFP_PGTABLE_USER); in pte_alloc_one() 99 static inline void pte_free(struct mm_struct *mm, struct page *pte_page) in pte_free() argument
|
D | 5level-fixup.h | 16 #define pud_alloc(mm, p4d, address) \ argument 17 ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ 20 #define p4d_alloc(mm, pgd, address) (pgd) argument 43 #define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) argument 44 #define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) argument 53 #define p4d_free(mm, x) do { } while (0) argument
|
D | mm_hooks.h | 11 struct mm_struct *mm) in arch_dup_mmap() argument 16 static inline void arch_exit_mmap(struct mm_struct *mm) in arch_exit_mmap() argument 20 static inline void arch_unmap(struct mm_struct *mm, in arch_unmap() argument 25 static inline void arch_bprm_mm_init(struct mm_struct *mm, in arch_bprm_mm_init() argument
|
D | mmu_context.h | 15 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument 21 struct mm_struct *mm) in init_new_context() argument 26 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 31 struct mm_struct *mm) in deactivate_mm() argument
|
D | 4level-fixup.h | 15 #define pmd_alloc(mm, pud, address) \ argument 16 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \ 26 #define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd) argument 32 #define pud_free(mm, x) do { } while (0) argument
|
D | hugetlb.h | 36 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, in huge_pte_clear() argument 39 pte_clear(mm, addr, ptep); in huge_pte_clear() 53 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, in set_huge_pte_at() argument 56 set_pte_at(mm, addr, ptep, pte); in set_huge_pte_at() 61 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, in huge_ptep_get_and_clear() argument 64 return ptep_get_and_clear(mm, addr, ptep); in huge_ptep_get_and_clear() 106 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, in huge_ptep_set_wrprotect() argument 109 ptep_set_wrprotect(mm, addr, ptep); in huge_ptep_set_wrprotect()
|
D | pgtable-nop4d-hack.h | 33 #define pgd_populate(mm, pgd, pud) do { } while (0) argument 34 #define pgd_populate_safe(mm, pgd, pud) do { } while (0) argument 56 #define pud_alloc_one(mm, address) NULL argument 57 #define pud_free(mm, x) do { } while (0) argument
|
D | pgtable-nopud.h | 37 #define p4d_populate(mm, p4d, pud) do { } while (0) argument 38 #define p4d_populate_safe(mm, p4d, pud) do { } while (0) argument 60 #define pud_alloc_one(mm, address) NULL argument 61 #define pud_free(mm, x) do { } while (0) argument
|
D | pgtable-nop4d.h | 28 #define pgd_populate(mm, pgd, p4d) do { } while (0) argument 29 #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) argument 51 #define p4d_alloc_one(mm, address) NULL argument 52 #define p4d_free(mm, x) do { } while (0) argument
|
/include/trace/events/ |
D | huge_memory.h | 51 TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, 54 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), 57 __field(struct mm_struct *, mm) 67 __entry->mm = mm; 77 __entry->mm, 88 TP_PROTO(struct mm_struct *mm, int isolated, int status), 90 TP_ARGS(mm, isolated, status), 93 __field(struct mm_struct *, mm) 99 __entry->mm = mm; 105 __entry->mm, [all …]
|
D | xen.h | 157 TP_PROTO(struct mm_struct *mm, unsigned long addr, 159 TP_ARGS(mm, addr, ptep, pteval), 161 __field(struct mm_struct *, mm) 166 TP_fast_assign(__entry->mm = mm; 171 __entry->mm, __entry->addr, __entry->ptep, 197 TP_PROTO(struct mm_struct *mm, unsigned long addr, pte_t *ptep), 198 TP_ARGS(mm, addr, ptep), 200 __field(struct mm_struct *, mm) 204 TP_fast_assign(__entry->mm = mm; 208 __entry->mm, __entry->addr, __entry->ptep) [all …]
|