Lines Matching refs:vma
121 static bool valid_vma(struct vm_area_struct *vma, bool is_register) in valid_vma() argument
128 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma()
131 static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset) in offset_to_vaddr() argument
133 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr()
136 static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr) in vaddr_to_offset() argument
138 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset()
154 static int __replace_page(struct vm_area_struct *vma, unsigned long addr, in __replace_page() argument
157 struct mm_struct *mm = vma->vm_mm; in __replace_page()
160 .vma = vma, in __replace_page()
166 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr, in __replace_page()
170 err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL); in __replace_page()
186 page_add_new_anon_rmap(new_page, vma, addr, false); in __replace_page()
187 lru_cache_add_inactive_or_unevictable(new_page, vma); in __replace_page()
197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page()
198 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page()
201 mk_pte(new_page, vma->vm_page_prot)); in __replace_page()
208 if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page)) in __replace_page()
343 struct vm_area_struct *vma) in valid_ref_ctr_vma() argument
345 unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset); in valid_ref_ctr_vma()
348 vma->vm_file && in valid_ref_ctr_vma()
349 file_inode(vma->vm_file) == uprobe->inode && in valid_ref_ctr_vma()
350 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && in valid_ref_ctr_vma()
351 vma->vm_start <= vaddr && in valid_ref_ctr_vma()
352 vma->vm_end > vaddr; in valid_ref_ctr_vma()
372 struct vm_area_struct *vma; in __update_ref_ctr() local
380 FOLL_WRITE, &page, &vma, NULL); in __update_ref_ctr()
468 struct vm_area_struct *vma; in uprobe_write_opcode() local
481 &old_page, &vma, NULL); in uprobe_write_opcode()
508 ret = anon_vma_prepare(vma); in uprobe_write_opcode()
513 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); in uprobe_write_opcode()
527 index = vaddr_to_offset(vma, vaddr & PAGE_MASK) >> PAGE_SHIFT; in uprobe_write_opcode()
528 orig_page = find_get_page(vma->vm_file->f_inode->i_mapping, in uprobe_write_opcode()
545 ret = __replace_page(vma, vaddr, old_page, new_page); in uprobe_write_opcode()
898 struct vm_area_struct *vma, unsigned long vaddr) in install_breakpoint() argument
903 ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr); in install_breakpoint()
969 struct vm_area_struct *vma; in build_map_info() local
977 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in build_map_info()
978 if (!valid_vma(vma, is_register)) in build_map_info()
996 if (!mmget_not_zero(vma->vm_mm)) in build_map_info()
1004 info->mm = vma->vm_mm; in build_map_info()
1005 info->vaddr = offset_to_vaddr(vma, offset); in build_map_info()
1052 struct vm_area_struct *vma; in register_for_each_vma() local
1058 vma = find_vma(mm, info->vaddr); in register_for_each_vma()
1059 if (!vma || !valid_vma(vma, is_register) || in register_for_each_vma()
1060 file_inode(vma->vm_file) != uprobe->inode) in register_for_each_vma()
1063 if (vma->vm_start > info->vaddr || in register_for_each_vma()
1064 vaddr_to_offset(vma, info->vaddr) != uprobe->offset) in register_for_each_vma()
1071 err = install_breakpoint(uprobe, mm, vma, info->vaddr); in register_for_each_vma()
1240 struct vm_area_struct *vma; in unapply_uprobe() local
1244 for (vma = mm->mmap; vma; vma = vma->vm_next) { in unapply_uprobe()
1248 if (!valid_vma(vma, false) || in unapply_uprobe()
1249 file_inode(vma->vm_file) != uprobe->inode) in unapply_uprobe()
1252 offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; in unapply_uprobe()
1254 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe()
1257 vaddr = offset_to_vaddr(vma, uprobe->offset); in unapply_uprobe()
1294 struct vm_area_struct *vma, in build_probe_list() argument
1303 min = vaddr_to_offset(vma, start); in build_probe_list()
1328 static int delayed_ref_ctr_inc(struct vm_area_struct *vma) in delayed_ref_ctr_inc() argument
1339 if (du->mm != vma->vm_mm || in delayed_ref_ctr_inc()
1340 !valid_ref_ctr_vma(du->uprobe, vma)) in delayed_ref_ctr_inc()
1343 vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset); in delayed_ref_ctr_inc()
1344 ret = __update_ref_ctr(vma->vm_mm, vaddr, 1); in delayed_ref_ctr_inc()
1346 update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1); in delayed_ref_ctr_inc()
1362 int uprobe_mmap(struct vm_area_struct *vma) in uprobe_mmap() argument
1371 if (vma->vm_file && in uprobe_mmap()
1372 (vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE && in uprobe_mmap()
1373 test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags)) in uprobe_mmap()
1374 delayed_ref_ctr_inc(vma); in uprobe_mmap()
1376 if (!valid_vma(vma, true)) in uprobe_mmap()
1379 inode = file_inode(vma->vm_file); in uprobe_mmap()
1384 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); in uprobe_mmap()
1392 filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) { in uprobe_mmap()
1393 unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); in uprobe_mmap()
1394 install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); in uprobe_mmap()
1404 vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) in vma_has_uprobes() argument
1410 inode = file_inode(vma->vm_file); in vma_has_uprobes()
1412 min = vaddr_to_offset(vma, start); in vma_has_uprobes()
1425 void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) in uprobe_munmap() argument
1427 if (no_uprobe_events() || !valid_vma(vma, false)) in uprobe_munmap()
1430 if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ in uprobe_munmap()
1433 if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || in uprobe_munmap()
1434 test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) in uprobe_munmap()
1437 if (vma_has_uprobes(vma, start, end)) in uprobe_munmap()
1438 set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); in uprobe_munmap()
1444 struct vm_area_struct *vma; in xol_add_vma() local
1465 vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE, in xol_add_vma()
1468 if (IS_ERR(vma)) { in xol_add_vma()
1469 ret = PTR_ERR(vma); in xol_add_vma()
1992 struct vm_area_struct *vma; in mmf_recalc_uprobes() local
1994 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mmf_recalc_uprobes()
1995 if (!valid_vma(vma, false)) in mmf_recalc_uprobes()
2003 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) in mmf_recalc_uprobes()
2048 struct vm_area_struct *vma; in find_active_uprobe() local
2051 vma = find_vma(mm, bp_vaddr); in find_active_uprobe()
2052 if (vma && vma->vm_start <= bp_vaddr) { in find_active_uprobe()
2053 if (valid_vma(vma, false)) { in find_active_uprobe()
2054 struct inode *inode = file_inode(vma->vm_file); in find_active_uprobe()
2055 loff_t offset = vaddr_to_offset(vma, bp_vaddr); in find_active_uprobe()