/mm/ |
D | mmap.c | 81 struct vm_area_struct *vma, struct vm_area_struct *prev, 126 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() 143 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() 160 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() 172 static void __free_vma(struct vm_area_struct *vma) in __free_vma() 181 void put_vma(struct vm_area_struct *vma) in put_vma() 187 static inline void put_vma(struct vm_area_struct *vma) in put_vma() 196 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) in remove_vma() 198 struct vm_area_struct *next = vma->vm_next; in remove_vma() 214 struct vm_area_struct *next; in SYSCALL_DEFINE1() [all …]
|
D | mremap.c | 70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() 84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() 103 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() 111 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() 134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() 136 struct vm_area_struct *new_vma, pmd_t *new_pmd, in move_ptes() 214 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) in trylock_vma_ref_count() 226 static inline void unlock_vma_ref_count(struct vm_area_struct *vma) in unlock_vma_ref_count() 237 static inline bool trylock_vma_ref_count(struct vm_area_struct *vma) in trylock_vma_ref_count() 241 static inline void unlock_vma_ref_count(struct vm_area_struct *vma) in unlock_vma_ref_count() [all …]
|
D | internal.h | 40 extern struct vm_area_struct *get_vma(struct mm_struct *mm, 42 extern void put_vma(struct vm_area_struct *vma); 59 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 62 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) in can_madv_lru_vma() 68 struct vm_area_struct *vma, 351 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 352 struct vm_area_struct *prev); 353 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma); 356 extern long populate_vma_page_range(struct vm_area_struct *vma, 358 extern void munlock_vma_pages_range(struct vm_area_struct *vma, [all …]
|
D | interval_tree.c | 13 static inline unsigned long vma_start_pgoff(struct vm_area_struct *v) in vma_start_pgoff() 18 static inline unsigned long vma_last_pgoff(struct vm_area_struct *v) in vma_last_pgoff() 23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 28 void vma_interval_tree_insert_after(struct vm_area_struct *node, in vma_interval_tree_insert_after() 29 struct vm_area_struct *prev, in vma_interval_tree_insert_after() 33 struct vm_area_struct *parent; in vma_interval_tree_insert_after() 43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after()
|
D | madvise.c | 69 static long madvise_behavior(struct vm_area_struct *vma, in madvise_behavior() 70 struct vm_area_struct **prev, in madvise_behavior() 193 struct vm_area_struct *vma = walk->private; in swapin_walk_pmd_entry() 228 static void force_shm_swapin_readahead(struct vm_area_struct *vma, in force_shm_swapin_readahead() 262 static long madvise_willneed(struct vm_area_struct *vma, in madvise_willneed() 263 struct vm_area_struct **prev, in madvise_willneed() 319 struct vm_area_struct *vma = walk->vma; in madvise_cold_or_pageout_pte_range() 511 struct vm_area_struct *vma, in madvise_cold_page_range() 524 static long madvise_cold(struct vm_area_struct *vma, in madvise_cold() 525 struct vm_area_struct **prev, in madvise_cold() [all …]
|
D | nommu.c | 100 struct vm_area_struct *vma; in kobjsize() 124 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() 173 struct vm_area_struct *vma; in __vmalloc_user_flags() 363 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() 370 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() 377 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() 563 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() 565 struct vm_area_struct *pvma, *prev; in add_vma_to_mm() 590 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); in add_vma_to_mm() 619 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm() [all …]
|
D | pgtable-generic.c | 64 int ptep_set_access_flags(struct vm_area_struct *vma, in ptep_set_access_flags() 78 int ptep_clear_flush_young(struct vm_area_struct *vma, in ptep_clear_flush_young() 90 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, in ptep_clear_flush() 105 int pmdp_set_access_flags(struct vm_area_struct *vma, in pmdp_set_access_flags() 120 int pmdp_clear_flush_young(struct vm_area_struct *vma, in pmdp_clear_flush_young() 133 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, in pmdp_huge_clear_flush() 146 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, in pudp_huge_clear_flush() 194 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, in pmdp_invalidate() 204 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, in pmdp_collapse_flush()
|
D | memory.c | 430 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() 434 struct vm_area_struct *next = vma->vm_next; in free_pgtables() 545 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() 639 struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in _vm_normal_page() 699 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() 748 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, in copy_nonpresent_pte() 749 struct vm_area_struct *src_vma, unsigned long addr, int *rss) in copy_nonpresent_pte() 847 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_page() 916 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, in copy_present_pte() 964 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, in page_copy_prealloc() [all …]
|
D | rmap.c | 147 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() 185 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() 277 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) in anon_vma_clone() 331 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() 393 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() 721 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() 784 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() 848 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() 920 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() 994 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() [all …]
|
D | vmacache.c | 35 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) in vmacache_update() 61 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) in vmacache_find() 72 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find() 92 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, in vmacache_find_exact() 105 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
|
D | hugetlb.c | 81 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 243 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() 802 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() 808 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() 819 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() 833 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() 866 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() 871 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() 960 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() 975 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() [all …]
|
D | gup.c | 374 static struct page *no_page_table(struct vm_area_struct *vma, in no_page_table() 391 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() 425 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() 587 static struct page *follow_pmd_mask(struct vm_area_struct *vma, in follow_pmd_mask() 702 static struct page *follow_pud_mask(struct vm_area_struct *vma, in follow_pud_mask() 742 static struct page *follow_p4d_mask(struct vm_area_struct *vma, in follow_p4d_mask() 787 static struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() 827 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, in follow_page() 840 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() 897 static int faultin_page(struct vm_area_struct *vma, in faultin_page() [all …]
|
D | huge_memory.c | 66 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() 73 bool transparent_hugepage_active(struct vm_area_struct *vma) in transparent_hugepage_active() 479 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() 584 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() 668 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) in alloc_hugepage_direct_gfpmask() 696 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() 713 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() 772 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() 836 struct vm_area_struct *vma = vmf->vma; in vmf_insert_pfn_pmd_prot() 867 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() [all …]
|
D | userfaultfd.c | 22 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, in find_dst_vma() 30 struct vm_area_struct *dst_vma; in find_dst_vma() 58 struct vm_area_struct *dst_vma, in mfill_atomic_install_pte() 125 struct vm_area_struct *dst_vma, in mcopy_atomic_pte() 185 struct vm_area_struct *dst_vma, in mfill_zeropage_pte() 221 struct vm_area_struct *dst_vma, in mcontinue_atomic_pte() 280 struct vm_area_struct *dst_vma, in __mcopy_atomic_hugetlb() 489 struct vm_area_struct *dst_vma, 499 struct vm_area_struct *dst_vma, in mfill_atomic_pte() 550 struct vm_area_struct *dst_vma; in __mcopy_atomic() [all …]
|
D | mprotect.c | 38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() 212 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() 284 static inline unsigned long change_pud_range(struct vm_area_struct *vma, in change_pud_range() 304 static inline unsigned long change_p4d_range(struct vm_area_struct *vma, in change_p4d_range() 324 static unsigned long change_protection_range(struct vm_area_struct *vma, in change_protection_range() 354 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() 399 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() 518 struct vm_area_struct *vma, *prev; in do_mprotect_pkey()
|
D | mempolicy.c | 406 struct vm_area_struct *vma; in mpol_rebind_mm() 444 struct vm_area_struct *first; 525 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() 656 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() 668 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() 678 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk() 773 static int vma_replace_policy(struct vm_area_struct *vma, in vma_replace_policy() 816 struct vm_area_struct *prev; in mbind_range() 817 struct vm_area_struct *vma; in mbind_range() 950 struct vm_area_struct *vma = NULL; in do_get_mempolicy() [all …]
|
D | mincore.c | 72 struct vm_area_struct *vma, unsigned char *vec) in __mincore_unmapped_range() 103 struct vm_area_struct *vma = walk->vma; in mincore_pte_range() 157 static inline bool can_do_mincore(struct vm_area_struct *vma) in can_do_mincore() 186 struct vm_area_struct *vma; in do_mincore()
|
D | mlock.c | 381 struct vm_area_struct *vma, struct zone *zone, in __munlock_pagevec_fill() 451 void munlock_vma_pages_range(struct vm_area_struct *vma, in munlock_vma_pages_range() 534 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, in mlock_fixup() 603 struct vm_area_struct * vma, * prev; in apply_vma_lock_flags() 658 struct vm_area_struct *vma; in count_mm_mlocked_page_nr() 781 struct vm_area_struct * vma, * prev = NULL; in apply_mlockall_flags()
|
D | pagewalk.c | 248 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() 291 struct vm_area_struct *vma = walk->vma; in walk_page_test() 318 struct vm_area_struct *vma = walk->vma; in __walk_page_range() 385 struct vm_area_struct *vma; in walk_page_range() 465 int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, in walk_page_vma() 527 struct vm_area_struct *vma; in walk_page_mapping()
|
D | swap_state.c | 370 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache() 453 struct vm_area_struct *vma, unsigned long addr, in __read_swap_cache_async() 559 struct vm_area_struct *vma, unsigned long addr, bool do_poll) in read_swap_cache_async() 665 struct vm_area_struct *vma = vmf->vma; in swap_cluster_readahead() 742 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, in swap_ra_clamp_pfn() 758 struct vm_area_struct *vma = vmf->vma; in swap_ra_info() 841 struct vm_area_struct *vma = vmf->vma; in swap_vma_readahead()
|
D | khugepaged.c | 345 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() 440 static bool hugepage_vma_check(struct vm_area_struct *vma, in hugepage_vma_check() 507 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() 599 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() 742 struct vm_area_struct *vma, in __collapse_huge_page_copy() 974 struct vm_area_struct **vmap) in hugepage_vma_revalidate() 976 struct vm_area_struct *vma; in hugepage_vma_revalidate() 1007 struct vm_area_struct *vma, in __collapse_huge_page_swapin() 1075 struct vm_area_struct *vma; in collapse_huge_page() 1236 struct vm_area_struct *vma, in khugepaged_scan_pmd() [all …]
|
D | ksm.c | 470 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() 520 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, in find_mergeable_vma() 523 struct vm_area_struct *vma; in find_mergeable_vma() 538 struct vm_area_struct *vma; in break_cow() 557 struct vm_area_struct *vma; in get_mergeable_page() 845 static int unmerge_ksm_pages(struct vm_area_struct *vma, in unmerge_ksm_pages() 970 struct vm_area_struct *vma; in unmerge_and_remove_all_rmap_items() 1034 static int write_protect_page(struct vm_area_struct *vma, struct page *page, in write_protect_page() 1121 static int replace_page(struct vm_area_struct *vma, struct page *page, in replace_page() 1203 static int try_to_merge_one_page(struct vm_area_struct *vma, in try_to_merge_one_page() [all …]
|
D | util.c | 278 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_list() 279 struct vm_area_struct *prev) in __vma_link_list() 281 struct vm_area_struct *next; in __vma_link_list() 296 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma) in __vma_unlink_list() 298 struct vm_area_struct *prev, *next; in __vma_unlink_list() 311 int vma_is_stack_for_current(struct vm_area_struct *vma) in vma_is_stack_for_current()
|
D | debug_vm_pgtable.c | 90 struct vm_area_struct *vma, pte_t *ptep, in pte_advanced_tests() 187 struct vm_area_struct *vma, pmd_t *pmdp, in pmd_advanced_tests() 335 struct vm_area_struct *vma, pud_t *pudp, in pud_advanced_tests() 427 struct vm_area_struct *vma, pud_t *pudp, in pud_advanced_tests() 441 struct vm_area_struct *vma, pmd_t *pmdp, in pmd_advanced_tests() 447 struct vm_area_struct *vma, pud_t *pudp, in pud_advanced_tests() 975 struct vm_area_struct *vma; in debug_vm_pgtable()
|
/mm/damon/ |
D | vaddr-test.h | 17 static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas) in __link_vmas() 77 struct vm_area_struct vmas[] = { in damon_test_three_regions_in_vmas() 78 (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, in damon_test_three_regions_in_vmas() 79 (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, in damon_test_three_regions_in_vmas() 80 (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, in damon_test_three_regions_in_vmas() 81 (struct vm_area_struct) {.vm_start = 210, .vm_end = 220}, in damon_test_three_regions_in_vmas() 82 (struct vm_area_struct) {.vm_start = 300, .vm_end = 305}, in damon_test_three_regions_in_vmas() 83 (struct vm_area_struct) {.vm_start = 307, .vm_end = 330}, in damon_test_three_regions_in_vmas()
|