• Home
  • Raw
  • Download

Lines Matching refs:vma

23 static struct page *no_page_table(struct vm_area_struct *vma,  in no_page_table()  argument
34 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
39 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
54 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
55 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
73 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
76 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
84 return no_page_table(vma, flags); in follow_page_pte()
113 page = vm_normal_page(vma, address, pte); in follow_page_pte()
136 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
178 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
211 return no_page_table(vma, flags); in follow_page_pte()
214 static struct page *follow_pmd_mask(struct vm_area_struct *vma, in follow_pmd_mask() argument
221 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask()
225 return no_page_table(vma, flags); in follow_pmd_mask()
226 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_pmd_mask()
230 return no_page_table(vma, flags); in follow_pmd_mask()
233 page = follow_huge_pd(vma, address, in follow_pmd_mask()
238 return no_page_table(vma, flags); in follow_pmd_mask()
243 return no_page_table(vma, flags); in follow_pmd_mask()
252 page = follow_devmap_pmd(vma, address, pmd, flags); in follow_pmd_mask()
258 return follow_page_pte(vma, address, pmd, flags); in follow_pmd_mask()
261 return no_page_table(vma, flags); in follow_pmd_mask()
268 return no_page_table(vma, flags); in follow_pmd_mask()
274 return follow_page_pte(vma, address, pmd, flags); in follow_pmd_mask()
282 split_huge_pmd(vma, pmd, address); in follow_pmd_mask()
296 return no_page_table(vma, flags); in follow_pmd_mask()
300 follow_page_pte(vma, address, pmd, flags); in follow_pmd_mask()
302 page = follow_trans_huge_pmd(vma, address, pmd, flags); in follow_pmd_mask()
309 static struct page *follow_pud_mask(struct vm_area_struct *vma, in follow_pud_mask() argument
316 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask()
320 return no_page_table(vma, flags); in follow_pud_mask()
321 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_pud_mask()
325 return no_page_table(vma, flags); in follow_pud_mask()
328 page = follow_huge_pd(vma, address, in follow_pud_mask()
333 return no_page_table(vma, flags); in follow_pud_mask()
337 page = follow_devmap_pud(vma, address, pud, flags); in follow_pud_mask()
343 return no_page_table(vma, flags); in follow_pud_mask()
345 return follow_pmd_mask(vma, address, pud, flags, page_mask); in follow_pud_mask()
349 static struct page *follow_p4d_mask(struct vm_area_struct *vma, in follow_p4d_mask() argument
358 return no_page_table(vma, flags); in follow_p4d_mask()
361 return no_page_table(vma, flags); in follow_p4d_mask()
364 page = follow_huge_pd(vma, address, in follow_p4d_mask()
369 return no_page_table(vma, flags); in follow_p4d_mask()
371 return follow_pud_mask(vma, address, p4d, flags, page_mask); in follow_p4d_mask()
387 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
393 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
407 return no_page_table(vma, flags); in follow_page_mask()
413 return no_page_table(vma, flags); in follow_page_mask()
416 page = follow_huge_pd(vma, address, in follow_page_mask()
421 return no_page_table(vma, flags); in follow_page_mask()
424 return follow_p4d_mask(vma, address, pgd, flags, page_mask); in follow_page_mask()
428 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() argument
460 *vma = get_gate_vma(mm); in get_gate_page()
463 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
492 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, in faultin_page() argument
514 ret = handle_mm_fault(vma, address, fault_flags); in faultin_page()
545 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
550 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) in check_vma_flags() argument
552 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
559 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) in check_vma_flags()
592 if (!arch_vma_access_permitted(vma, write, false, foreign)) in check_vma_flags()
660 struct vm_area_struct *vma = NULL; in __get_user_pages() local
681 if (!vma || start >= vma->vm_end) { in __get_user_pages()
682 vma = find_extend_vma(mm, start); in __get_user_pages()
683 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
686 gup_flags, &vma, in __get_user_pages()
694 if (!vma || check_vma_flags(vma, gup_flags)) in __get_user_pages()
696 if (is_vm_hugetlb_page(vma)) { in __get_user_pages()
697 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
711 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages()
714 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
740 flush_anon_page(vma, page, start); in __get_user_pages()
746 vmas[i] = vma; in __get_user_pages()
759 static bool vma_permits_fault(struct vm_area_struct *vma, in vma_permits_fault() argument
766 if (!(vm_flags & vma->vm_flags)) in vma_permits_fault()
776 if (!arch_vma_access_permitted(vma, write, false, foreign)) in vma_permits_fault()
816 struct vm_area_struct *vma; in fixup_user_fault() local
823 vma = find_extend_vma(mm, address); in fixup_user_fault()
824 if (!vma || address < vma->vm_start) in fixup_user_fault()
827 if (!vma_permits_fault(vma, fault_flags)) in fixup_user_fault()
830 ret = handle_mm_fault(vma, address, fault_flags); in fixup_user_fault()
1147 struct vm_area_struct *vma = vmas[i]; in get_user_pages_longterm() local
1149 if (vma == vma_prev) in get_user_pages_longterm()
1152 vma_prev = vma; in get_user_pages_longterm()
1154 if (vma_is_fsdax(vma)) in get_user_pages_longterm()
1196 long populate_vma_page_range(struct vm_area_struct *vma, in populate_vma_page_range() argument
1199 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
1205 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
1206 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
1210 if (vma->vm_flags & VM_LOCKONFAULT) in populate_vma_page_range()
1217 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in populate_vma_page_range()
1224 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) in populate_vma_page_range()
1246 struct vm_area_struct *vma = NULL; in __mm_populate() local
1260 vma = find_vma(mm, nstart); in __mm_populate()
1261 } else if (nstart >= vma->vm_end) in __mm_populate()
1262 vma = vma->vm_next; in __mm_populate()
1263 if (!vma || vma->vm_start >= end) in __mm_populate()
1269 nend = min(end, vma->vm_end); in __mm_populate()
1270 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in __mm_populate()
1272 if (nstart < vma->vm_start) in __mm_populate()
1273 nstart = vma->vm_start; in __mm_populate()
1279 ret = populate_vma_page_range(vma, nstart, nend, &locked); in __mm_populate()
1312 struct vm_area_struct *vma; in get_dump_page() local
1316 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, in get_dump_page()
1319 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page()