• Home
  • Raw
  • Download

Lines Matching refs:vma

447 static struct page *no_page_table(struct vm_area_struct *vma,  in no_page_table()  argument
459 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) in no_page_table()
464 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
479 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
480 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
498 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
502 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
517 if (is_vm_hugetlb_page(vma)) { in follow_page_pte()
518 page = follow_huge_pmd_pte(vma, address, flags); in follow_page_pte()
521 return no_page_table(vma, flags); in follow_page_pte()
526 return no_page_table(vma, flags); in follow_page_pte()
555 page = vm_normal_page(vma, address, pte); in follow_page_pte()
577 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
612 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
645 return no_page_table(vma, flags); in follow_page_pte()
648 static struct page *follow_pmd_mask(struct vm_area_struct *vma, in follow_pmd_mask() argument
656 struct mm_struct *mm = vma->vm_mm; in follow_pmd_mask()
665 return no_page_table(vma, flags); in follow_pmd_mask()
666 if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) { in follow_pmd_mask()
667 page = follow_huge_pmd_pte(vma, address, flags); in follow_pmd_mask()
670 return no_page_table(vma, flags); in follow_pmd_mask()
673 page = follow_huge_pd(vma, address, in follow_pmd_mask()
678 return no_page_table(vma, flags); in follow_pmd_mask()
683 return no_page_table(vma, flags); in follow_pmd_mask()
694 return no_page_table(vma, flags); in follow_pmd_mask()
699 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
705 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
708 return no_page_table(vma, flags); in follow_pmd_mask()
714 return no_page_table(vma, flags); in follow_pmd_mask()
719 return no_page_table(vma, flags); in follow_pmd_mask()
725 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
733 split_huge_pmd(vma, pmd, address); in follow_pmd_mask()
738 split_huge_pmd(vma, pmd, address); in follow_pmd_mask()
743 follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); in follow_pmd_mask()
745 page = follow_trans_huge_pmd(vma, address, pmd, flags); in follow_pmd_mask()
751 static struct page *follow_pud_mask(struct vm_area_struct *vma, in follow_pud_mask() argument
759 struct mm_struct *mm = vma->vm_mm; in follow_pud_mask()
763 return no_page_table(vma, flags); in follow_pud_mask()
764 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) { in follow_pud_mask()
768 return no_page_table(vma, flags); in follow_pud_mask()
771 page = follow_huge_pd(vma, address, in follow_pud_mask()
776 return no_page_table(vma, flags); in follow_pud_mask()
780 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); in follow_pud_mask()
786 return no_page_table(vma, flags); in follow_pud_mask()
788 return follow_pmd_mask(vma, address, pud, flags, ctx); in follow_pud_mask()
791 static struct page *follow_p4d_mask(struct vm_area_struct *vma, in follow_p4d_mask() argument
801 return no_page_table(vma, flags); in follow_p4d_mask()
804 return no_page_table(vma, flags); in follow_p4d_mask()
807 page = follow_huge_pd(vma, address, in follow_p4d_mask()
812 return no_page_table(vma, flags); in follow_p4d_mask()
814 return follow_pud_mask(vma, address, p4d, flags, ctx); in follow_p4d_mask()
836 static struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
842 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
856 return no_page_table(vma, flags); in follow_page_mask()
862 return no_page_table(vma, flags); in follow_page_mask()
865 page = follow_huge_pd(vma, address, in follow_page_mask()
870 return no_page_table(vma, flags); in follow_page_mask()
873 return follow_p4d_mask(vma, address, pgd, flags, ctx); in follow_page_mask()
876 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, in follow_page() argument
882 if (vma_is_secretmem(vma)) in follow_page()
885 page = follow_page_mask(vma, address, foll_flags, &ctx); in follow_page()
892 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() argument
924 *vma = get_gate_vma(mm); in get_gate_page()
927 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
949 static int faultin_page(struct vm_area_struct *vma, in faultin_page() argument
976 ret = handle_mm_fault(vma, address, fault_flags, NULL); in faultin_page()
1000 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
1005 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) in check_vma_flags() argument
1007 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
1014 if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) in check_vma_flags()
1017 if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) in check_vma_flags()
1020 if (vma_is_secretmem(vma)) in check_vma_flags()
1053 if (!arch_vma_access_permitted(vma, write, false, foreign)) in check_vma_flags()
1124 struct vm_area_struct *vma = NULL; in __get_user_pages() local
1148 if (!vma || start >= vma->vm_end) { in __get_user_pages()
1149 vma = find_extend_vma(mm, start); in __get_user_pages()
1150 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
1152 gup_flags, &vma, in __get_user_pages()
1160 if (!vma) { in __get_user_pages()
1164 ret = check_vma_flags(vma, gup_flags); in __get_user_pages()
1168 if (is_vm_hugetlb_page(vma)) { in __get_user_pages()
1169 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1195 page = follow_page_mask(vma, start, foll_flags, &ctx); in __get_user_pages()
1197 ret = faultin_page(vma, start, &foll_flags, locked); in __get_user_pages()
1224 flush_anon_page(vma, page, start); in __get_user_pages()
1230 vmas[i] = vma; in __get_user_pages()
1246 static bool vma_permits_fault(struct vm_area_struct *vma, in vma_permits_fault() argument
1253 if (!(vm_flags & vma->vm_flags)) in vma_permits_fault()
1263 if (!arch_vma_access_permitted(vma, write, false, foreign)) in vma_permits_fault()
1302 struct vm_area_struct *vma; in fixup_user_fault() local
1311 vma = find_extend_vma(mm, address); in fixup_user_fault()
1312 if (!vma || address < vma->vm_start) in fixup_user_fault()
1315 if (!vma_permits_fault(vma, fault_flags)) in fixup_user_fault()
1322 ret = handle_mm_fault(vma, address, fault_flags, NULL); in fixup_user_fault()
1494 long populate_vma_page_range(struct vm_area_struct *vma, in populate_vma_page_range() argument
1497 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
1503 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
1504 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
1508 if (vma->vm_flags & VM_LOCKONFAULT) in populate_vma_page_range()
1515 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in populate_vma_page_range()
1522 if (vma_is_accessible(vma)) in populate_vma_page_range()
1556 long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, in faultin_vma_page_range() argument
1559 struct mm_struct *mm = vma->vm_mm; in faultin_vma_page_range()
1565 VM_BUG_ON_VMA(start < vma->vm_start, vma); in faultin_vma_page_range()
1566 VM_BUG_ON_VMA(end > vma->vm_end, vma); in faultin_vma_page_range()
1587 if (check_vma_flags(vma, gup_flags)) in faultin_vma_page_range()
1605 struct vm_area_struct *vma = NULL; in __mm_populate() local
1619 vma = find_vma(mm, nstart); in __mm_populate()
1620 } else if (nstart >= vma->vm_end) in __mm_populate()
1621 vma = vma->vm_next; in __mm_populate()
1622 if (!vma || vma->vm_start >= end) in __mm_populate()
1628 nend = min(end, vma->vm_end); in __mm_populate()
1629 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in __mm_populate()
1631 if (nstart < vma->vm_start) in __mm_populate()
1632 nstart = vma->vm_start; in __mm_populate()
1638 ret = populate_vma_page_range(vma, nstart, nend, &locked); in __mm_populate()
1659 struct vm_area_struct *vma; in __get_user_pages_locked() local
1672 vma = find_vma(mm, start); in __get_user_pages_locked()
1673 if (!vma) in __get_user_pages_locked()
1677 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages_locked()
1678 !(vm_flags & vma->vm_flags)) in __get_user_pages_locked()
1687 vmas[i] = vma; in __get_user_pages_locked()