• Home
  • Raw
  • Download

Lines Matching refs:vma

19 static struct page *no_page_table(struct vm_area_struct *vma,  in no_page_table()  argument
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
45 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
48 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
55 return no_page_table(vma, flags); in follow_page_pte()
84 page = vm_normal_page(vma, address, pte); in follow_page_pte()
105 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
137 return no_page_table(vma, flags); in follow_page_pte()
153 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
162 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
174 return no_page_table(vma, flags); in follow_page_mask()
178 return no_page_table(vma, flags); in follow_page_mask()
179 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
183 return no_page_table(vma, flags); in follow_page_mask()
186 return no_page_table(vma, flags); in follow_page_mask()
190 return no_page_table(vma, flags); in follow_page_mask()
191 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
195 return no_page_table(vma, flags); in follow_page_mask()
198 return no_page_table(vma, flags); in follow_page_mask()
201 split_huge_page_pmd(vma, address, pmd); in follow_page_mask()
202 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
208 wait_split_huge_page(vma->anon_vma, pmd); in follow_page_mask()
210 page = follow_trans_huge_pmd(vma, address, in follow_page_mask()
219 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
223 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() argument
249 *vma = get_gate_vma(mm); in get_gate_page()
252 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
271 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, in faultin_page() argument
274 struct mm_struct *mm = vma->vm_mm; in faultin_page()
289 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page()
322 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
327 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) in check_vma_flags() argument
329 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
428 struct vm_area_struct *vma = NULL; in __get_user_pages() local
449 if (!vma || start >= vma->vm_end) { in __get_user_pages()
450 vma = find_extend_vma(mm, start); in __get_user_pages()
451 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
454 gup_flags, &vma, in __get_user_pages()
462 if (!vma || check_vma_flags(vma, gup_flags)) in __get_user_pages()
464 if (is_vm_hugetlb_page(vma)) { in __get_user_pages()
465 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
479 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages()
482 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
502 flush_anon_page(vma, page, start); in __get_user_pages()
508 vmas[i] = vma; in __get_user_pages()
552 struct vm_area_struct *vma; in fixup_user_fault() local
556 vma = find_extend_vma(mm, address); in fixup_user_fault()
557 if (!vma || address < vma->vm_start) in fixup_user_fault()
561 if (!(vm_flags & vma->vm_flags)) in fixup_user_fault()
564 ret = handle_mm_fault(mm, vma, address, fault_flags); in fixup_user_fault()
668 struct vm_area_struct *vma; in get_dump_page() local
672 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, in get_dump_page()
675 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page()