Lines Matching refs:vma
52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
90 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
97 struct mm_struct *mm = vma->vm_mm; in move_ptes()
120 if (vma->vm_file) { in move_ptes()
121 mapping = vma->vm_file->f_mapping; in move_ptes()
124 if (vma->anon_vma) { in move_ptes()
125 anon_vma = vma->anon_vma; in move_ptes()
164 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
176 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
180 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
189 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
192 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
198 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, in move_page_tables()
199 vma); in move_page_tables()
202 anon_vma_lock_write(vma->anon_vma); in move_page_tables()
203 err = move_huge_pmd(vma, new_vma, old_addr, in move_page_tables()
207 anon_vma_unlock_write(vma->anon_vma); in move_page_tables()
213 split_huge_page_pmd(vma, old_addr, old_pmd); in move_page_tables()
225 move_ptes(vma, old_pmd, old_addr, old_addr + extent, in move_page_tables()
230 flush_tlb_range(vma, old_end-len, old_addr); in move_page_tables()
232 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
237 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
241 struct mm_struct *mm = vma->vm_mm; in move_vma()
243 unsigned long vm_flags = vma->vm_flags; in move_vma()
266 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
271 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
272 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
277 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
285 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
287 vma = new_vma; in move_vma()
295 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
296 excess = vma->vm_end - vma->vm_start - old_len; in move_vma()
297 if (old_addr > vma->vm_start && in move_vma()
298 old_addr + old_len < vma->vm_end) in move_vma()
312 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); in move_vma()
323 vma->vm_flags |= VM_ACCOUNT; in move_vma()
325 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
340 struct vm_area_struct *vma = find_vma(mm, addr); in vma_to_resize() local
342 if (!vma || vma->vm_start > addr) in vma_to_resize()
345 if (is_vm_hugetlb_page(vma)) in vma_to_resize()
349 if (old_len > vma->vm_end - addr) in vma_to_resize()
356 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
358 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
359 pgoff += vma->vm_pgoff; in vma_to_resize()
364 if (vma->vm_flags & VM_LOCKED) { in vma_to_resize()
376 if (vma->vm_flags & VM_ACCOUNT) { in vma_to_resize()
383 return vma; in vma_to_resize()
399 struct vm_area_struct *vma; in mremap_to() local
430 vma = vma_to_resize(addr, old_len, new_len, &charged); in mremap_to()
431 if (IS_ERR(vma)) { in mremap_to()
432 ret = PTR_ERR(vma); in mremap_to()
437 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
440 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
441 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
446 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); in mremap_to()
456 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
458 unsigned long end = vma->vm_end + delta; in vma_expandable()
459 if (end < vma->vm_end) /* overflow */ in vma_expandable()
461 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ in vma_expandable()
463 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
481 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
530 vma = vma_to_resize(addr, old_len, new_len, &charged); in SYSCALL_DEFINE5()
531 if (IS_ERR(vma)) { in SYSCALL_DEFINE5()
532 ret = PTR_ERR(vma); in SYSCALL_DEFINE5()
538 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
540 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
543 if (vma_adjust(vma, vma->vm_start, addr + new_len, in SYSCALL_DEFINE5()
544 vma->vm_pgoff, NULL)) { in SYSCALL_DEFINE5()
549 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); in SYSCALL_DEFINE5()
550 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
567 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
570 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
571 vma->vm_pgoff + in SYSCALL_DEFINE5()
572 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
579 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); in SYSCALL_DEFINE5()