Lines Matching refs:vma
379 struct vm_area_struct *vma; in mpol_rebind_mm() local
382 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm()
383 mpol_rebind_policy(vma->vm_policy, new); in mpol_rebind_mm()
452 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); in queue_pages_pmd()
473 if (!vma_migratable(walk->vma)) { in queue_pages_pmd()
494 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local
502 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range()
518 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range()
546 if (!vma_migratable(vma)) in queue_pages_pte_range()
568 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
597 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
602 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa()
609 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
619 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk() local
621 unsigned long endvma = vma->vm_end; in queue_pages_test_walk()
628 if (!vma_migratable(vma) && in queue_pages_test_walk()
634 if (vma->vm_start > start) in queue_pages_test_walk()
635 start = vma->vm_start; in queue_pages_test_walk()
638 if (!vma->vm_next && vma->vm_end < end) in queue_pages_test_walk()
640 if (qp->prev && qp->prev->vm_end < vma->vm_start) in queue_pages_test_walk()
644 qp->prev = vma; in queue_pages_test_walk()
648 if (!is_vm_hugetlb_page(vma) && in queue_pages_test_walk()
649 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) && in queue_pages_test_walk()
650 !(vma->vm_flags & VM_MIXEDMAP)) in queue_pages_test_walk()
651 change_prot_numa(vma, start, endvma); in queue_pages_test_walk()
694 static int vma_replace_policy(struct vm_area_struct *vma, in vma_replace_policy() argument
702 vma->vm_start, vma->vm_end, vma->vm_pgoff, in vma_replace_policy()
703 vma->vm_ops, vma->vm_file, in vma_replace_policy()
704 vma->vm_ops ? vma->vm_ops->set_policy : NULL); in vma_replace_policy()
710 if (vma->vm_ops && vma->vm_ops->set_policy) { in vma_replace_policy()
711 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy()
716 old = vma->vm_policy; in vma_replace_policy()
717 vma->vm_policy = new; /* protected by mmap_sem */ in vma_replace_policy()
732 struct vm_area_struct *vma; in mbind_range() local
738 vma = find_vma(mm, start); in mbind_range()
739 if (!vma || vma->vm_start > start) in mbind_range()
742 prev = vma->vm_prev; in mbind_range()
743 if (start > vma->vm_start) in mbind_range()
744 prev = vma; in mbind_range()
746 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
747 next = vma->vm_next; in mbind_range()
748 vmstart = max(start, vma->vm_start); in mbind_range()
749 vmend = min(end, vma->vm_end); in mbind_range()
751 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
754 pgoff = vma->vm_pgoff + in mbind_range()
755 ((vmstart - vma->vm_start) >> PAGE_SHIFT); in mbind_range()
756 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
757 vma->anon_vma, vma->vm_file, pgoff, in mbind_range()
758 new_pol, vma->vm_userfaultfd_ctx, in mbind_range()
759 vma_get_anon_name(vma)); in mbind_range()
761 vma = prev; in mbind_range()
762 next = vma->vm_next; in mbind_range()
763 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
768 if (vma->vm_start != vmstart) { in mbind_range()
769 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
773 if (vma->vm_end != vmend) { in mbind_range()
774 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
779 err = vma_replace_policy(vma, new_pol); in mbind_range()
870 struct vm_area_struct *vma = NULL; in do_get_mempolicy() local
894 vma = find_vma_intersection(mm, addr, addr+1); in do_get_mempolicy()
895 if (!vma) { in do_get_mempolicy()
899 if (vma->vm_ops && vma->vm_ops->get_policy) in do_get_mempolicy()
900 pol = vma->vm_ops->get_policy(vma, addr); in do_get_mempolicy()
902 pol = vma->vm_policy; in do_get_mempolicy()
945 if (vma) in do_get_mempolicy()
1138 struct vm_area_struct *vma; in new_page() local
1141 vma = find_vma(current->mm, start); in new_page()
1142 while (vma) { in new_page()
1143 address = page_address_in_vma(page, vma); in new_page()
1146 vma = vma->vm_next; in new_page()
1150 BUG_ON(!vma); in new_page()
1151 return alloc_huge_page_noerr(vma, address, 1); in new_page()
1155 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, in new_page()
1166 vma, address); in new_page()
1610 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, in __get_vma_policy() argument
1615 if (vma) { in __get_vma_policy()
1616 if (vma->vm_ops && vma->vm_ops->get_policy) { in __get_vma_policy()
1617 pol = vma->vm_ops->get_policy(vma, addr); in __get_vma_policy()
1618 } else if (vma->vm_policy) { in __get_vma_policy()
1619 pol = vma->vm_policy; in __get_vma_policy()
1647 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() argument
1650 struct mempolicy *pol = __get_vma_policy(vma, addr); in get_vma_policy()
1658 bool vma_policy_mof(struct vm_area_struct *vma) in vma_policy_mof() argument
1662 if (vma->vm_ops && vma->vm_ops->get_policy) { in vma_policy_mof()
1665 pol = vma->vm_ops->get_policy(vma, vma->vm_start); in vma_policy_mof()
1673 pol = vma->vm_policy; in vma_policy_mof()
1814 struct vm_area_struct *vma, unsigned long addr, int shift) in interleave_nid() argument
1816 if (vma) { in interleave_nid()
1827 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid()
1828 off += (addr - vma->vm_start) >> shift; in interleave_nid()
1850 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, in huge_node() argument
1855 *mpol = get_vma_policy(vma, addr); in huge_node()
1859 nid = interleave_nid(*mpol, vma, addr, in huge_node()
1860 huge_page_shift(hstate_vma(vma))); in huge_node()
2003 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2011 pol = get_vma_policy(vma, addr); in alloc_pages_vma()
2016 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2307 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) in mpol_misplaced() argument
2318 pol = get_vma_policy(vma, addr); in mpol_misplaced()
2324 pgoff = vma->vm_pgoff; in mpol_misplaced()
2325 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; in mpol_misplaced()
2542 struct vm_area_struct *vma, struct mempolicy *npol) in mpol_set_shared_policy() argument
2546 unsigned long sz = vma_pages(vma); in mpol_set_shared_policy()
2549 vma->vm_pgoff, in mpol_set_shared_policy()
2555 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); in mpol_set_shared_policy()
2559 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); in mpol_set_shared_policy()