Lines Matching refs:tmp
2261 struct vm_area_struct *tmp; in find_vma() local
2263 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); in find_vma()
2265 if (tmp->vm_end > addr) { in find_vma()
2266 vma = tmp; in find_vma()
2267 if (tmp->vm_start <= addr) in find_vma()
2831 struct vm_area_struct *tmp = vma; in __do_munmap() local
2832 while (tmp && tmp->vm_start < end) { in __do_munmap()
2833 if (tmp->vm_flags & VM_LOCKED) { in __do_munmap()
2834 mm->locked_vm -= vma_pages(tmp); in __do_munmap()
2835 munlock_vma_pages_all(tmp); in __do_munmap()
2838 tmp = tmp->vm_next; in __do_munmap()
2969 struct vm_area_struct *tmp; in SYSCALL_DEFINE5() local
2973 for (tmp = vma; tmp->vm_start >= start + size; in SYSCALL_DEFINE5()
2974 tmp = tmp->vm_next) { in SYSCALL_DEFINE5()
2979 vma_adjust_trans_huge(tmp, start, start + size, 0); in SYSCALL_DEFINE5()
2981 munlock_vma_pages_range(tmp, in SYSCALL_DEFINE5()
2982 max(tmp->vm_start, start), in SYSCALL_DEFINE5()
2983 min(tmp->vm_end, start + size)); in SYSCALL_DEFINE5()
3733 unsigned long tmp, free_kbytes; in reserve_mem_notifier() local
3738 tmp = sysctl_user_reserve_kbytes; in reserve_mem_notifier()
3739 if (0 < tmp && tmp < (1UL << 17)) in reserve_mem_notifier()
3743 tmp = sysctl_admin_reserve_kbytes; in reserve_mem_notifier()
3744 if (0 < tmp && tmp < (1UL << 13)) in reserve_mem_notifier()