Lines Matching refs:vma
551 struct vm_area_struct *vma; /* Target VMA */ member
615 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
634 int (*access)(struct vm_area_struct *vma, unsigned long addr,
640 const char *(*name)(struct vm_area_struct *vma);
650 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
662 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
670 struct page *(*find_special_page)(struct vm_area_struct *vma,
680 static inline void vma_numab_state_init(struct vm_area_struct *vma) in vma_numab_state_init() argument
682 vma->numab_state = NULL; in vma_numab_state_init()
684 static inline void vma_numab_state_free(struct vm_area_struct *vma) in vma_numab_state_free() argument
686 kfree(vma->numab_state); in vma_numab_state_free()
689 static inline void vma_numab_state_init(struct vm_area_struct *vma) {} in vma_numab_state_init() argument
690 static inline void vma_numab_state_free(struct vm_area_struct *vma) {} in vma_numab_state_free() argument
694 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) in vma_lock_init() argument
699 lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); in vma_lock_init()
702 refcount_set(&vma->vm_refcnt, 0); in vma_lock_init()
703 vma->vm_lock_seq = UINT_MAX; in vma_lock_init()
717 static inline void vma_refcount_put(struct vm_area_struct *vma) in vma_refcount_put() argument
720 struct mm_struct *mm = vma->vm_mm; in vma_refcount_put()
723 rwsem_release(&vma->vmlock_dep_map, _RET_IP_); in vma_refcount_put()
724 if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { in vma_refcount_put()
741 struct vm_area_struct *vma) in vma_start_read()
752 if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) in vma_start_read()
761 if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, in vma_start_read()
767 rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); in vma_start_read()
779 if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { in vma_start_read()
780 vma_refcount_put(vma); in vma_start_read()
784 return vma; in vma_start_read()
793 static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) in vma_start_read_locked_nested() argument
797 mmap_assert_locked(vma->vm_mm); in vma_start_read_locked_nested()
798 if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt, in vma_start_read_locked_nested()
802 rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); in vma_start_read_locked_nested()
812 static inline bool vma_start_read_locked(struct vm_area_struct *vma) in vma_start_read_locked() argument
814 return vma_start_read_locked_nested(vma, 0); in vma_start_read_locked()
817 static inline void vma_end_read(struct vm_area_struct *vma) in vma_end_read() argument
819 vma_refcount_put(vma); in vma_end_read()
823 static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) in __is_vma_write_locked() argument
825 mmap_assert_write_locked(vma->vm_mm); in __is_vma_write_locked()
831 *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence; in __is_vma_write_locked()
832 return (vma->vm_lock_seq == *mm_lock_seq); in __is_vma_write_locked()
835 void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq);
842 static inline void vma_start_write(struct vm_area_struct *vma) in vma_start_write() argument
846 if (__is_vma_write_locked(vma, &mm_lock_seq)) in vma_start_write()
849 __vma_start_write(vma, mm_lock_seq); in vma_start_write()
852 static inline void vma_assert_write_locked(struct vm_area_struct *vma) in vma_assert_write_locked() argument
856 VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); in vma_assert_write_locked()
859 static inline void vma_assert_locked(struct vm_area_struct *vma) in vma_assert_locked() argument
863 VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 && in vma_assert_locked()
864 !__is_vma_write_locked(vma, &mm_lock_seq), vma); in vma_assert_locked()
872 static inline void vma_assert_attached(struct vm_area_struct *vma) in vma_assert_attached() argument
874 WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt)); in vma_assert_attached()
877 static inline void vma_assert_detached(struct vm_area_struct *vma) in vma_assert_detached() argument
879 WARN_ON_ONCE(refcount_read(&vma->vm_refcnt)); in vma_assert_detached()
882 static inline void vma_mark_attached(struct vm_area_struct *vma) in vma_mark_attached() argument
884 vma_assert_write_locked(vma); in vma_mark_attached()
885 vma_assert_detached(vma); in vma_mark_attached()
886 refcount_set_release(&vma->vm_refcnt, 1); in vma_mark_attached()
889 void vma_mark_detached(struct vm_area_struct *vma);
894 vma_end_read(vmf->vma); in release_fault_lock()
896 mmap_read_unlock(vmf->vma->vm_mm); in release_fault_lock()
902 vma_assert_locked(vmf->vma); in assert_fault_locked()
904 mmap_assert_locked(vmf->vma->vm_mm); in assert_fault_locked()
912 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} in vma_lock_init() argument
914 struct vm_area_struct *vma) in vma_start_read()
916 static inline void vma_end_read(struct vm_area_struct *vma) {} in vma_end_read() argument
917 static inline void vma_start_write(struct vm_area_struct *vma) {} in vma_start_write() argument
918 static inline void vma_assert_write_locked(struct vm_area_struct *vma) in vma_assert_write_locked() argument
919 { mmap_assert_write_locked(vma->vm_mm); } in vma_assert_write_locked()
920 static inline void vma_assert_attached(struct vm_area_struct *vma) {} in vma_assert_attached() argument
921 static inline void vma_assert_detached(struct vm_area_struct *vma) {} in vma_assert_detached() argument
922 static inline void vma_mark_attached(struct vm_area_struct *vma) {} in vma_mark_attached() argument
923 static inline void vma_mark_detached(struct vm_area_struct *vma) {} in vma_mark_detached() argument
931 static inline void vma_assert_locked(struct vm_area_struct *vma) in vma_assert_locked() argument
933 mmap_assert_locked(vma->vm_mm); in vma_assert_locked()
938 mmap_read_unlock(vmf->vma->vm_mm); in release_fault_lock()
943 mmap_assert_locked(vmf->vma->vm_mm); in assert_fault_locked()
950 static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) in vma_init() argument
952 memset(vma, 0, sizeof(*vma)); in vma_init()
953 vma->vm_mm = mm; in vma_init()
954 vma->vm_ops = &vma_dummy_vm_ops; in vma_init()
955 INIT_LIST_HEAD(&vma->anon_vma_chain); in vma_init()
956 vma_lock_init(vma, false); in vma_init()
960 static inline void vm_flags_init(struct vm_area_struct *vma, in vm_flags_init() argument
963 ACCESS_PRIVATE(vma, __vm_flags) = flags; in vm_flags_init()
971 static inline void vm_flags_reset(struct vm_area_struct *vma, in vm_flags_reset() argument
974 vma_assert_write_locked(vma); in vm_flags_reset()
976 flags = vma_pad_fixup_flags(vma, flags); in vm_flags_reset()
977 vm_flags_init(vma, flags); in vm_flags_reset()
980 static inline void vm_flags_reset_once(struct vm_area_struct *vma, in vm_flags_reset_once() argument
983 vma_assert_write_locked(vma); in vm_flags_reset_once()
985 flags = vma_pad_fixup_flags(vma, flags); in vm_flags_reset_once()
986 WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); in vm_flags_reset_once()
989 static inline void vm_flags_set(struct vm_area_struct *vma, in vm_flags_set() argument
992 vma_start_write(vma); in vm_flags_set()
993 ACCESS_PRIVATE(vma, __vm_flags) |= flags; in vm_flags_set()
996 static inline void vm_flags_clear(struct vm_area_struct *vma, in vm_flags_clear() argument
999 vma_start_write(vma); in vm_flags_clear()
1000 ACCESS_PRIVATE(vma, __vm_flags) &= ~flags; in vm_flags_clear()
1007 static inline void __vm_flags_mod(struct vm_area_struct *vma, in __vm_flags_mod() argument
1010 vm_flags_init(vma, (vma->vm_flags | set) & ~clear); in __vm_flags_mod()
1017 static inline void vm_flags_mod(struct vm_area_struct *vma, in vm_flags_mod() argument
1020 vma_start_write(vma); in vm_flags_mod()
1021 __vm_flags_mod(vma, set, clear); in vm_flags_mod()
1024 static inline void vma_set_anonymous(struct vm_area_struct *vma) in vma_set_anonymous() argument
1026 vma->vm_ops = NULL; in vma_set_anonymous()
1029 static inline bool vma_is_anonymous(struct vm_area_struct *vma) in vma_is_anonymous() argument
1031 return !vma->vm_ops; in vma_is_anonymous()
1038 static inline bool vma_is_initial_heap(const struct vm_area_struct *vma) in vma_is_initial_heap() argument
1040 return vma->vm_start < vma->vm_mm->brk && in vma_is_initial_heap()
1041 vma->vm_end > vma->vm_mm->start_brk; in vma_is_initial_heap()
1048 static inline bool vma_is_initial_stack(const struct vm_area_struct *vma) in vma_is_initial_stack() argument
1055 return vma->vm_start <= vma->vm_mm->start_stack && in vma_is_initial_stack()
1056 vma->vm_end >= vma->vm_mm->start_stack; in vma_is_initial_stack()
1059 static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) in vma_is_temporary_stack() argument
1061 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in vma_is_temporary_stack()
1066 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in vma_is_temporary_stack()
1073 static inline bool vma_is_foreign(struct vm_area_struct *vma) in vma_is_foreign() argument
1078 if (current->mm != vma->vm_mm) in vma_is_foreign()
1084 static inline bool vma_is_accessible(struct vm_area_struct *vma) in vma_is_accessible() argument
1086 return vma->vm_flags & VM_ACCESS_FLAGS; in vma_is_accessible()
1095 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) in vma_is_shared_maywrite() argument
1097 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite()
1145 struct vm_area_struct *vma) in vma_iter_bulk_store() argument
1147 vmi->mas.index = vma->vm_start; in vma_iter_bulk_store()
1148 vmi->mas.last = vma->vm_end - 1; in vma_iter_bulk_store()
1149 mas_store(&vmi->mas, vma); in vma_iter_bulk_store()
1153 vma_mark_attached(vma); in vma_iter_bulk_store()
1179 bool vma_is_shmem(struct vm_area_struct *vma);
1180 bool vma_is_anon_shmem(struct vm_area_struct *vma);
1182 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } in vma_is_shmem() argument
1183 static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; } in vma_is_anon_shmem() argument
1186 int vma_is_stack_for_current(struct vm_area_struct *vma);
1448 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) in maybe_mkwrite() argument
1450 if (likely(vma->vm_flags & VM_WRITE)) in maybe_mkwrite()
1451 pte = pte_mkwrite(pte, vma); in maybe_mkwrite()
1843 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) in vma_set_access_pid_bit() argument
1848 if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) { in vma_set_access_pid_bit()
1849 __set_bit(pid_bit, &vma->numab_state->pids_active[1]); in vma_set_access_pid_bit()
1904 static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) in vma_set_access_pid_bit() argument
2067 static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, in folio_needs_cow_for_dma() argument
2070 VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); in folio_needs_cow_for_dma()
2072 if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) in folio_needs_cow_for_dma()
2533 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2535 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2537 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2539 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2542 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2544 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2546 static inline void zap_vma_pages(struct vm_area_struct *vma) in zap_vma_pages() argument
2548 zap_page_range_single(vma, vma->vm_start, in zap_vma_pages()
2549 vma->vm_end - vma->vm_start, NULL); in zap_vma_pages()
2561 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2570 struct vm_area_struct *vma; member
2606 extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2617 static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, in handle_mm_fault() argument
2670 struct vm_area_struct *vma; in get_user_page_vma_remote() local
2681 vma = vma_lookup(mm, addr); in get_user_page_vma_remote()
2682 if (WARN_ON_ONCE(!vma)) { in get_user_page_vma_remote()
2687 *vmap = vma; in get_user_page_vma_remote()
2742 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2745 struct vm_area_struct *vma, unsigned long start,
2748 struct vm_area_struct *vma, struct vm_area_struct **pprev,
3459 #define vma_interval_tree_foreach(vma, root, start, last) \ argument
3460 for (vma = vma_interval_tree_iter_first(root, start, last); \
3461 vma; vma = vma_interval_tree_iter_next(vma, start, last))
3484 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
3511 extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3542 int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3595 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3599 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
3626 static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) in stack_guard_start_gap() argument
3628 if (vma->vm_flags & VM_GROWSDOWN) in stack_guard_start_gap()
3632 if (vma->vm_flags & VM_SHADOW_STACK) in stack_guard_start_gap()
3638 static inline unsigned long vm_start_gap(struct vm_area_struct *vma) in vm_start_gap() argument
3640 unsigned long gap = stack_guard_start_gap(vma); in vm_start_gap()
3641 unsigned long vm_start = vma->vm_start; in vm_start_gap()
3644 if (vm_start > vma->vm_start) in vm_start_gap()
3649 static inline unsigned long vm_end_gap(struct vm_area_struct *vma) in vm_end_gap() argument
3651 unsigned long vm_end = vma->vm_end; in vm_end_gap()
3653 if (vma->vm_flags & VM_GROWSUP) { in vm_end_gap()
3655 if (vm_end < vma->vm_end) in vm_end_gap()
3661 static inline unsigned long vma_pages(struct vm_area_struct *vma) in vma_pages() argument
3663 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; in vma_pages()
3670 struct vm_area_struct *vma = vma_lookup(mm, vm_start); in find_exact_vma() local
3672 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) in find_exact_vma()
3673 vma = NULL; in find_exact_vma()
3675 return vma; in find_exact_vma()
3678 static inline bool range_in_vma(struct vm_area_struct *vma, in range_in_vma() argument
3681 return (vma && vma->vm_start <= start && end <= vma->vm_end); in range_in_vma()
3686 void vma_set_page_prot(struct vm_area_struct *vma);
3692 static inline void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
3694 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in vma_set_page_prot()
3698 void vma_set_file(struct vm_area_struct *vma, struct file *file);
3701 unsigned long change_prot_numa(struct vm_area_struct *vma,
3709 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3712 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3714 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3716 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3718 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3720 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3722 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3724 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3726 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3728 static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, in vmf_insert_page() argument
3731 int err = vm_insert_page(vma, addr, page); in vmf_insert_page()
3742 static inline int io_remap_pfn_range(struct vm_area_struct *vma, in io_remap_pfn_range() argument
3746 return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); in io_remap_pfn_range()
3792 static inline bool gup_can_follow_protnone(struct vm_area_struct *vma, in gup_can_follow_protnone() argument
3809 return !vma_is_accessible(vma); in gup_can_follow_protnone()
3992 const char * arch_vma_name(struct vm_area_struct *vma);
4210 struct vm_area_struct *vma);
4225 static inline bool vma_is_special_huge(const struct vm_area_struct *vma) in vma_is_special_huge() argument
4227 return vma_is_dax(vma) || (vma->vm_file && in vma_is_special_huge()
4228 (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); in vma_is_special_huge()
4307 static inline int seal_check_write(int seals, struct vm_area_struct *vma) in seal_check_write() argument
4316 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) in seal_check_write()
4358 void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4359 void vma_pgtable_walk_end(struct vm_area_struct *vma);