/kernel/linux/linux-5.10/mm/ |
D | gup.c | 1030 struct vm_area_struct **vmas, int *locked) in __get_user_pages() argument 1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages() 1135 if (vmas) { in __get_user_pages() 1136 vmas[i] = vma; in __get_user_pages() 1257 struct vm_area_struct **vmas, in __get_user_pages_locked() argument 1266 BUG_ON(vmas); in __get_user_pages_locked() 1290 vmas, locked); in __get_user_pages_locked() 1502 struct vm_area_struct **vmas, int *locked, in __get_user_pages_locked() argument 1532 if (vmas) in __get_user_pages_locked() 1533 vmas[i] = vma; in __get_user_pages_locked() [all …]
|
D | vmacache.c | 38 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; in vmacache_update() 72 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find() 105 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
|
D | hugetlb.c | 4852 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page() argument 4970 if (!pages && !vmas && !pfn_offset && in follow_hugetlb_page() 5001 if (vmas) in follow_hugetlb_page() 5002 vmas[i] = vma; in follow_hugetlb_page()
|
D | nommu.c | 637 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
|
/kernel/linux/linux-5.10/include/linux/ |
D | vmacache.h | 10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); in vmacache_flush()
|
D | mm_types_task.h | 36 struct vm_area_struct *vmas[VMACACHE_SIZE]; member
|
D | mm.h | 1765 struct vm_area_struct **vmas, int *locked); 1769 struct vm_area_struct **vmas, int *locked); 1772 struct vm_area_struct **vmas); 1775 struct vm_area_struct **vmas);
|
D | hugetlb.h | 223 struct vm_area_struct **vmas, unsigned long *position, in follow_hugetlb_page() argument
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
D | msm_gem.c | 332 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma() 345 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma() 371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova() 840 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe() 844 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe() 1030 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
|
D | msm_gem.h | 80 struct list_head vmas; /* list of msm_gem_vma */ member
|
/kernel/linux/linux-5.10/drivers/video/fbdev/vermilion/ |
D | vermilion.h | 210 atomic_t vmas; member
|
/kernel/linux/linux-5.10/kernel/debug/ |
D | debug_core.c | 294 if (!current->vmacache.vmas[i]) in kgdb_flush_swbreak_addr() 296 flush_cache_range(current->vmacache.vmas[i], in kgdb_flush_swbreak_addr()
|
/kernel/linux/linux-5.10/Documentation/locking/ |
D | robust-futexes.rst | 58 FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether 69 microsecond on Linux, but with thousands (or tens of thousands) of vmas
|
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/ |
D | userfaultfd.rst | 35 operations never involve heavyweight structures like vmas (in fact the 40 Terabytes. Too many vmas would be needed for that.
|
/kernel/linux/linux-5.10/fs/ |
D | io_uring.c | 8242 struct vm_area_struct **vmas = NULL; in io_sqe_buffer_register() local 8289 kvfree(vmas); in io_sqe_buffer_register() 8293 vmas = kvmalloc_array(nr_pages, in io_sqe_buffer_register() 8296 if (!pages || !vmas) { in io_sqe_buffer_register() 8313 pages, vmas); in io_sqe_buffer_register() 8317 struct vm_area_struct *vma = vmas[j]; in io_sqe_buffer_register() 8367 kvfree(vmas); in io_sqe_buffer_register() 8371 kvfree(vmas); in io_sqe_buffer_register()
|
/kernel/linux/patches/linux-5.10/imx8mm_patch/patches/drivers/ |
D | 0038_linux_drivers_mxc.patch | 140373 + /* No such memory, or across vmas. */
|