Lines Matching refs:tmp
484 struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; in dup_mmap() local
544 tmp = vm_area_dup(mpnt); in dup_mmap()
545 if (!tmp) in dup_mmap()
547 retval = vma_dup_policy(mpnt, tmp); in dup_mmap()
550 tmp->vm_mm = mm; in dup_mmap()
551 retval = dup_userfaultfd(tmp, &uf); in dup_mmap()
554 if (tmp->vm_flags & VM_WIPEONFORK) { in dup_mmap()
560 tmp->anon_vma = NULL; in dup_mmap()
561 } else if (anon_vma_fork(tmp, mpnt)) in dup_mmap()
563 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT); in dup_mmap()
564 file = tmp->vm_file; in dup_mmap()
570 if (tmp->vm_flags & VM_DENYWRITE) in dup_mmap()
573 if (tmp->vm_flags & VM_SHARED) in dup_mmap()
577 vma_interval_tree_insert_after(tmp, mpnt, in dup_mmap()
588 if (is_vm_hugetlb_page(tmp)) in dup_mmap()
589 reset_vma_resv_huge_pages(tmp); in dup_mmap()
594 *pprev = tmp; in dup_mmap()
595 pprev = &tmp->vm_next; in dup_mmap()
596 tmp->vm_prev = prev; in dup_mmap()
597 prev = tmp; in dup_mmap()
599 __vma_link_rb(mm, tmp, rb_link, rb_parent); in dup_mmap()
600 rb_link = &tmp->vm_rb.rb_right; in dup_mmap()
601 rb_parent = &tmp->vm_rb; in dup_mmap()
604 if (!(tmp->vm_flags & VM_WIPEONFORK)) { in dup_mmap()
614 retval = copy_page_range(tmp, mpnt); in dup_mmap()
617 if (tmp->vm_ops && tmp->vm_ops->open) in dup_mmap()
618 tmp->vm_ops->open(tmp); in dup_mmap()
650 mpol_put(vma_policy(tmp)); in dup_mmap()
652 vm_area_free(tmp); in dup_mmap()