• Home
  • Raw
  • Download

Lines Matching refs:vma

451 	struct vm_area_struct *vma;  in mpol_rebind_mm()  local
454 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm()
455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm()
484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in queue_pages_pte_range() argument
493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range()
500 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range()
522 static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, in queue_pages_hugetlb_pmd_range() argument
532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); in queue_pages_hugetlb_pmd_range()
551 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, in queue_pages_pmd_range() argument
564 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { in queue_pages_pmd_range()
565 queue_pages_hugetlb_pmd_range(vma, pmd, nodes, in queue_pages_pmd_range()
569 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pmd_range()
572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes, in queue_pages_pmd_range()
579 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, in queue_pages_pud_range() argument
590 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) in queue_pages_pud_range()
594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes, in queue_pages_pud_range()
601 static inline int queue_pages_pgd_range(struct vm_area_struct *vma, in queue_pages_pgd_range() argument
609 pgd = pgd_offset(vma->vm_mm, addr); in queue_pages_pgd_range()
614 if (queue_pages_pud_range(vma, pgd, addr, next, nodes, in queue_pages_pgd_range()
631 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
636 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); in change_prot_numa()
643 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
662 struct vm_area_struct *vma, *prev; in queue_pages_range() local
664 vma = find_vma(mm, start); in queue_pages_range()
665 if (!vma) in queue_pages_range()
668 for (; vma && vma->vm_start < end; vma = vma->vm_next) { in queue_pages_range()
669 unsigned long endvma = vma->vm_end; in queue_pages_range()
673 if (vma->vm_start > start) in queue_pages_range()
674 start = vma->vm_start; in queue_pages_range()
677 if (!vma->vm_next && vma->vm_end < end) in queue_pages_range()
679 if (prev && prev->vm_end < vma->vm_start) in queue_pages_range()
685 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) in queue_pages_range()
686 change_prot_numa(vma, start, endvma); in queue_pages_range()
692 vma_migratable(vma))) { in queue_pages_range()
694 err = queue_pages_pgd_range(vma, start, endvma, nodes, in queue_pages_range()
700 prev = vma; in queue_pages_range()
709 static int vma_replace_policy(struct vm_area_struct *vma, in vma_replace_policy() argument
717 vma->vm_start, vma->vm_end, vma->vm_pgoff, in vma_replace_policy()
718 vma->vm_ops, vma->vm_file, in vma_replace_policy()
719 vma->vm_ops ? vma->vm_ops->set_policy : NULL); in vma_replace_policy()
725 if (vma->vm_ops && vma->vm_ops->set_policy) { in vma_replace_policy()
726 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy()
731 old = vma->vm_policy; in vma_replace_policy()
732 vma->vm_policy = new; /* protected by mmap_sem */ in vma_replace_policy()
747 struct vm_area_struct *vma; in mbind_range() local
753 vma = find_vma(mm, start); in mbind_range()
754 if (!vma || vma->vm_start > start) in mbind_range()
757 prev = vma->vm_prev; in mbind_range()
758 if (start > vma->vm_start) in mbind_range()
759 prev = vma; in mbind_range()
761 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
762 next = vma->vm_next; in mbind_range()
763 vmstart = max(start, vma->vm_start); in mbind_range()
764 vmend = min(end, vma->vm_end); in mbind_range()
766 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
769 pgoff = vma->vm_pgoff + in mbind_range()
770 ((vmstart - vma->vm_start) >> PAGE_SHIFT); in mbind_range()
771 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
772 vma->anon_vma, vma->vm_file, pgoff, in mbind_range()
773 new_pol, vma_get_anon_name(vma)); in mbind_range()
775 vma = prev; in mbind_range()
776 next = vma->vm_next; in mbind_range()
777 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
782 if (vma->vm_start != vmstart) { in mbind_range()
783 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
787 if (vma->vm_end != vmend) { in mbind_range()
788 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
793 err = vma_replace_policy(vma, new_pol); in mbind_range()
885 struct vm_area_struct *vma = NULL; in do_get_mempolicy() local
909 vma = find_vma_intersection(mm, addr, addr+1); in do_get_mempolicy()
910 if (!vma) { in do_get_mempolicy()
914 if (vma->vm_ops && vma->vm_ops->get_policy) in do_get_mempolicy()
915 pol = vma->vm_ops->get_policy(vma, addr); in do_get_mempolicy()
917 pol = vma->vm_policy; in do_get_mempolicy()
960 if (vma) in do_get_mempolicy()
1145 struct vm_area_struct *vma; in new_page() local
1148 vma = find_vma(current->mm, start); in new_page()
1149 while (vma) { in new_page()
1150 address = page_address_in_vma(page, vma); in new_page()
1153 vma = vma->vm_next; in new_page()
1157 BUG_ON(!vma); in new_page()
1158 return alloc_huge_page_noerr(vma, address, 1); in new_page()
1163 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in new_page()
1586 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, in __get_vma_policy() argument
1591 if (vma) { in __get_vma_policy()
1592 if (vma->vm_ops && vma->vm_ops->get_policy) { in __get_vma_policy()
1593 pol = vma->vm_ops->get_policy(vma, addr); in __get_vma_policy()
1594 } else if (vma->vm_policy) { in __get_vma_policy()
1595 pol = vma->vm_policy; in __get_vma_policy()
1623 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() argument
1626 struct mempolicy *pol = __get_vma_policy(vma, addr); in get_vma_policy()
1634 bool vma_policy_mof(struct vm_area_struct *vma) in vma_policy_mof() argument
1638 if (vma->vm_ops && vma->vm_ops->get_policy) { in vma_policy_mof()
1641 pol = vma->vm_ops->get_policy(vma, vma->vm_start); in vma_policy_mof()
1649 pol = vma->vm_policy; in vma_policy_mof()
1780 struct vm_area_struct *vma, unsigned long off) in offset_il_node() argument
1800 struct vm_area_struct *vma, unsigned long addr, int shift) in interleave_nid() argument
1802 if (vma) { in interleave_nid()
1813 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid()
1814 off += (addr - vma->vm_start) >> shift; in interleave_nid()
1815 return offset_il_node(pol, vma, off); in interleave_nid()
1851 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, in huge_zonelist() argument
1857 *mpol = get_vma_policy(vma, addr); in huge_zonelist()
1861 zl = node_zonelist(interleave_nid(*mpol, vma, addr, in huge_zonelist()
1862 huge_page_shift(hstate_vma(vma))), gfp_flags); in huge_zonelist()
2004 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
2012 pol = get_vma_policy(vma, addr); in alloc_pages_vma()
2018 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2265 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) in mpol_misplaced() argument
2276 BUG_ON(!vma); in mpol_misplaced()
2278 pol = get_vma_policy(vma, addr); in mpol_misplaced()
2284 BUG_ON(addr >= vma->vm_end); in mpol_misplaced()
2285 BUG_ON(addr < vma->vm_start); in mpol_misplaced()
2287 pgoff = vma->vm_pgoff; in mpol_misplaced()
2288 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; in mpol_misplaced()
2289 polnid = offset_il_node(pol, vma, pgoff); in mpol_misplaced()
2487 struct vm_area_struct *vma, struct mempolicy *npol) in mpol_set_shared_policy() argument
2491 unsigned long sz = vma_pages(vma); in mpol_set_shared_policy()
2494 vma->vm_pgoff, in mpol_set_shared_policy()
2500 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); in mpol_set_shared_policy()
2504 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); in mpol_set_shared_policy()