Home
last modified time | relevance | path

Searched refs:vmas (Results 1 – 16 of 16) sorted by relevance

/kernel/linux/linux-5.10/mm/
Dgup.c1030 struct vm_area_struct **vmas, int *locked) in __get_user_pages() argument
1074 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1135 if (vmas) { in __get_user_pages()
1136 vmas[i] = vma; in __get_user_pages()
1257 struct vm_area_struct **vmas, in __get_user_pages_locked() argument
1266 BUG_ON(vmas); in __get_user_pages_locked()
1290 vmas, locked); in __get_user_pages_locked()
1502 struct vm_area_struct **vmas, int *locked, in __get_user_pages_locked() argument
1532 if (vmas) in __get_user_pages_locked()
1533 vmas[i] = vma; in __get_user_pages_locked()
[all …]
Dvmacache.c38 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; in vmacache_update()
72 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find()
105 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
Dhugetlb.c4852 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page() argument
4970 if (!pages && !vmas && !pfn_offset && in follow_hugetlb_page()
5001 if (vmas) in follow_hugetlb_page()
5002 vmas[i] = vma; in follow_hugetlb_page()
Dnommu.c637 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
/kernel/linux/linux-5.10/include/linux/
Dvmacache.h10 memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); in vmacache_flush()
Dmm_types_task.h36 struct vm_area_struct *vmas[VMACACHE_SIZE]; member
Dmm.h1765 struct vm_area_struct **vmas, int *locked);
1769 struct vm_area_struct **vmas, int *locked);
1772 struct vm_area_struct **vmas);
1775 struct vm_area_struct **vmas);
Dhugetlb.h223 struct vm_area_struct **vmas, unsigned long *position, in follow_hugetlb_page() argument
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
Dmsm_gem.c332 list_add_tail(&vma->list, &msm_obj->vmas); in add_vma()
345 list_for_each_entry(vma, &msm_obj->vmas, list) { in lookup_vma()
371 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { in put_iova()
840 if (!list_empty(&msm_obj->vmas)) { in msm_gem_describe()
844 list_for_each_entry(vma, &msm_obj->vmas, list) { in msm_gem_describe()
1030 INIT_LIST_HEAD(&msm_obj->vmas); in msm_gem_new_impl()
Dmsm_gem.h80 struct list_head vmas; /* list of msm_gem_vma */ member
/kernel/linux/linux-5.10/drivers/video/fbdev/vermilion/
Dvermilion.h210 atomic_t vmas; member
/kernel/linux/linux-5.10/kernel/debug/
Ddebug_core.c294 if (!current->vmacache.vmas[i]) in kgdb_flush_swbreak_addr()
296 flush_cache_range(current->vmacache.vmas[i], in kgdb_flush_swbreak_addr()
/kernel/linux/linux-5.10/Documentation/locking/
Drobust-futexes.rst58 FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether
69 microsecond on Linux, but with thousands (or tens of thousands) of vmas
/kernel/linux/linux-5.10/Documentation/admin-guide/mm/
Duserfaultfd.rst35 operations never involve heavyweight structures like vmas (in fact the
40 Terabytes. Too many vmas would be needed for that.
/kernel/linux/linux-5.10/fs/
Dio_uring.c8242 struct vm_area_struct **vmas = NULL; in io_sqe_buffer_register() local
8289 kvfree(vmas); in io_sqe_buffer_register()
8293 vmas = kvmalloc_array(nr_pages, in io_sqe_buffer_register()
8296 if (!pages || !vmas) { in io_sqe_buffer_register()
8313 pages, vmas); in io_sqe_buffer_register()
8317 struct vm_area_struct *vma = vmas[j]; in io_sqe_buffer_register()
8367 kvfree(vmas); in io_sqe_buffer_register()
8371 kvfree(vmas); in io_sqe_buffer_register()
/kernel/linux/patches/linux-5.10/imx8mm_patch/patches/drivers/
D0038_linux_drivers_mxc.patch140373 + /* No such memory, or across vmas. */