• Home
  • Raw
  • Download

Lines Matching refs:vma

99 		struct vm_area_struct *vma;  in kobjsize()  local
101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
102 if (vma) in kobjsize()
103 return vma->vm_end - vma->vm_start; in kobjsize()
118 struct vm_area_struct *vma; in __get_user_pages() local
131 vma = find_vma(mm, start); in __get_user_pages()
132 if (!vma) in __get_user_pages()
136 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || in __get_user_pages()
137 !(vm_flags & vma->vm_flags)) in __get_user_pages()
146 vmas[i] = vma; in __get_user_pages()
211 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
214 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
251 struct vm_area_struct *vma; in vmalloc_user() local
254 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
255 if (vma) in vmalloc_user()
256 vma->vm_flags |= VM_USERMAP; in vmalloc_user()
489 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
672 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) in protect_vma() argument
675 struct mm_struct *mm = vma->vm_mm; in protect_vma()
676 long start = vma->vm_start & PAGE_MASK; in protect_vma()
677 while (start < vma->vm_end) { in protect_vma()
691 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
697 BUG_ON(!vma->vm_region); in add_vma_to_mm()
700 vma->vm_mm = mm; in add_vma_to_mm()
702 protect_vma(vma, vma->vm_flags); in add_vma_to_mm()
705 if (vma->vm_file) { in add_vma_to_mm()
706 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
710 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
724 if (vma->vm_start < pvma->vm_start) in add_vma_to_mm()
726 else if (vma->vm_start > pvma->vm_start) { in add_vma_to_mm()
729 } else if (vma->vm_end < pvma->vm_end) in add_vma_to_mm()
731 else if (vma->vm_end > pvma->vm_end) { in add_vma_to_mm()
734 } else if (vma < pvma) in add_vma_to_mm()
736 else if (vma > pvma) { in add_vma_to_mm()
743 rb_link_node(&vma->vm_rb, parent, p); in add_vma_to_mm()
744 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
751 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
757 static void delete_vma_from_mm(struct vm_area_struct *vma) in delete_vma_from_mm() argument
761 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
764 protect_vma(vma, 0); in delete_vma_from_mm()
769 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
776 if (vma->vm_file) { in delete_vma_from_mm()
777 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
781 vma_interval_tree_remove(vma, &mapping->i_mmap); in delete_vma_from_mm()
787 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
789 if (vma->vm_prev) in delete_vma_from_mm()
790 vma->vm_prev->vm_next = vma->vm_next; in delete_vma_from_mm()
792 mm->mmap = vma->vm_next; in delete_vma_from_mm()
794 if (vma->vm_next) in delete_vma_from_mm()
795 vma->vm_next->vm_prev = vma->vm_prev; in delete_vma_from_mm()
801 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
803 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
804 vma->vm_ops->close(vma); in delete_vma()
805 if (vma->vm_file) in delete_vma()
806 fput(vma->vm_file); in delete_vma()
807 put_nommu_region(vma->vm_region); in delete_vma()
808 kmem_cache_free(vm_area_cachep, vma); in delete_vma()
817 struct vm_area_struct *vma; in find_vma() local
820 vma = vmacache_find(mm, addr); in find_vma()
821 if (likely(vma)) in find_vma()
822 return vma; in find_vma()
826 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
827 if (vma->vm_start > addr) in find_vma()
829 if (vma->vm_end > addr) { in find_vma()
830 vmacache_update(addr, vma); in find_vma()
831 return vma; in find_vma()
852 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
865 struct vm_area_struct *vma; in find_vma_exact() local
869 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
870 if (vma) in find_vma_exact()
871 return vma; in find_vma_exact()
875 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
876 if (vma->vm_start < addr) in find_vma_exact()
878 if (vma->vm_start > addr) in find_vma_exact()
880 if (vma->vm_end == end) { in find_vma_exact()
881 vmacache_update(addr, vma); in find_vma_exact()
882 return vma; in find_vma_exact()
1092 static int do_mmap_shared_file(struct vm_area_struct *vma) in do_mmap_shared_file() argument
1096 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
1098 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
1113 static int do_mmap_private(struct vm_area_struct *vma, in do_mmap_private() argument
1127 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
1130 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
1131 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
1161 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1166 vma->vm_start = region->vm_start; in do_mmap_private()
1167 vma->vm_end = region->vm_start + len; in do_mmap_private()
1169 if (vma->vm_file) { in do_mmap_private()
1173 fpos = vma->vm_pgoff; in do_mmap_private()
1176 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1190 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1191 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1215 struct vm_area_struct *vma; in do_mmap() local
1243 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in do_mmap()
1244 if (!vma) in do_mmap()
1251 INIT_LIST_HEAD(&vma->anon_vma_chain); in do_mmap()
1252 vma->vm_flags = vm_flags; in do_mmap()
1253 vma->vm_pgoff = pgoff; in do_mmap()
1257 vma->vm_file = get_file(file); in do_mmap()
1309 vma->vm_region = pregion; in do_mmap()
1312 vma->vm_start = start; in do_mmap()
1313 vma->vm_end = start + len; in do_mmap()
1316 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap()
1318 ret = do_mmap_shared_file(vma); in do_mmap()
1320 vma->vm_region = NULL; in do_mmap()
1321 vma->vm_start = 0; in do_mmap()
1322 vma->vm_end = 0; in do_mmap()
1356 vma->vm_start = region->vm_start = addr; in do_mmap()
1357 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1362 vma->vm_region = region; in do_mmap()
1367 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1368 ret = do_mmap_shared_file(vma); in do_mmap()
1370 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1376 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) in do_mmap()
1381 result = vma->vm_start; in do_mmap()
1386 add_vma_to_mm(current->mm, vma); in do_mmap()
1390 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1405 if (vma->vm_file) in do_mmap()
1406 fput(vma->vm_file); in do_mmap()
1407 kmem_cache_free(vm_area_cachep, vma); in do_mmap()
1482 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1491 if (vma->vm_file) in split_vma()
1508 *new = *vma; in split_vma()
1509 *region = *vma->vm_region; in split_vma()
1512 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1524 delete_vma_from_mm(vma); in split_vma()
1526 delete_nommu_region(vma->vm_region); in split_vma()
1528 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1529 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1531 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1532 vma->vm_region->vm_top = addr; in split_vma()
1534 add_nommu_region(vma->vm_region); in split_vma()
1537 add_vma_to_mm(mm, vma); in split_vma()
1547 struct vm_area_struct *vma, in shrink_vma() argument
1554 delete_vma_from_mm(vma); in shrink_vma()
1555 if (from > vma->vm_start) in shrink_vma()
1556 vma->vm_end = from; in shrink_vma()
1558 vma->vm_start = to; in shrink_vma()
1559 add_vma_to_mm(mm, vma); in shrink_vma()
1562 region = vma->vm_region; in shrink_vma()
1587 struct vm_area_struct *vma; in do_munmap() local
1598 vma = find_vma(mm, start); in do_munmap()
1599 if (!vma) { in do_munmap()
1611 if (vma->vm_file) { in do_munmap()
1613 if (start > vma->vm_start) in do_munmap()
1615 if (end == vma->vm_end) in do_munmap()
1617 vma = vma->vm_next; in do_munmap()
1618 } while (vma); in do_munmap()
1622 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1624 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1628 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1630 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1631 ret = split_vma(mm, vma, start, 1); in do_munmap()
1635 return shrink_vma(mm, vma, start, end); in do_munmap()
1639 delete_vma_from_mm(vma); in do_munmap()
1640 delete_vma(mm, vma); in do_munmap()
1667 struct vm_area_struct *vma; in exit_mmap() local
1674 while ((vma = mm->mmap)) { in exit_mmap()
1675 mm->mmap = vma->vm_next; in exit_mmap()
1676 delete_vma_from_mm(vma); in exit_mmap()
1677 delete_vma(mm, vma); in exit_mmap()
1701 struct vm_area_struct *vma; in do_mremap() local
1715 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1716 if (!vma) in do_mremap()
1719 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1722 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1725 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1729 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1730 return vma->vm_start; in do_mremap()
1745 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
1753 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1759 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1764 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1767 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1769 pfn += vma->vm_pgoff; in vm_iomap_memory()
1770 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1774 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
1777 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1779 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1782 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1783 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1819 struct vm_area_struct *vma; in __access_remote_vm() local
1825 vma = find_vma(mm, addr); in __access_remote_vm()
1826 if (vma) { in __access_remote_vm()
1828 if (addr + len >= vma->vm_end) in __access_remote_vm()
1829 len = vma->vm_end - addr; in __access_remote_vm()
1832 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1833 copy_to_user_page(vma, NULL, addr, in __access_remote_vm()
1835 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1836 copy_from_user_page(vma, NULL, addr, in __access_remote_vm()
1902 struct vm_area_struct *vma; in nommu_shrink_inode_mappings() local
1914 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1917 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1930 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1931 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1934 region = vma->vm_region; in nommu_shrink_inode_mappings()