/mm/ |
D | mincore.c | 51 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument 64 page = find_get_entry(mapping, pgoff); in mincore_page() 83 page = find_get_page(mapping, pgoff); in mincore_page() 85 page = find_get_page(mapping, pgoff); in mincore_page() 102 pgoff_t pgoff; in __mincore_unmapped_range() local 104 pgoff = linear_page_index(vma, addr); in __mincore_unmapped_range() 105 for (i = 0; i < nr; i++, pgoff++) in __mincore_unmapped_range() 106 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range()
|
D | mmap.c | 720 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, in __vma_adjust() argument 867 vma->vm_pgoff = pgoff; in __vma_adjust() 1146 pgoff_t pgoff, struct mempolicy *policy, in vma_merge() argument 1180 anon_vma, file, pgoff, in vma_merge() 1190 pgoff+pglen, in vma_merge() 1214 anon_vma, file, pgoff+pglen, in vma_merge() 1385 unsigned long pgoff, unsigned long len) in file_mmap_ok() argument 1392 if (pgoff > maxsize >> PAGE_SHIFT) in file_mmap_ok() 1403 unsigned long pgoff, unsigned long *populate, in do_mmap() argument 1437 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) in do_mmap() [all …]
|
D | nommu.c | 785 unsigned long pgoff, in validate_mmap_request() argument 808 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) in validate_mmap_request() 1100 unsigned long pgoff, in do_mmap() argument 1114 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, in do_mmap() 1138 region->vm_pgoff = pgoff; in do_mmap() 1141 vma->vm_pgoff = pgoff; in do_mmap() 1163 pgend = pgoff + pglen; in do_mmap() 1182 if (pgoff >= rpgend) in do_mmap() 1187 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && in do_mmap() 1188 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { in do_mmap() [all …]
|
D | mremap.c | 438 unsigned long pgoff; in vma_to_resize() local 467 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize() 468 pgoff += vma->vm_pgoff; in vma_to_resize() 469 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) in vma_to_resize()
|
D | memory-failure.c | 444 pgoff_t pgoff; in collect_procs_anon() local 450 pgoff = page_to_pgoff(page); in collect_procs_anon() 459 pgoff, pgoff) { in collect_procs_anon() 484 pgoff_t pgoff = page_to_pgoff(page); in collect_procs_file() local 489 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 490 pgoff) { in collect_procs_file()
|
D | mprotect.c | 346 pgoff_t pgoff; in mprotect_fixup() local 394 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup() 396 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
|
D | khugepaged.c | 902 .pgoff = linear_page_index(vma, address), in __collapse_huge_page_swapin() 1417 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) in retract_page_tables() argument 1424 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables() 1443 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables() 1972 pgoff_t pgoff = linear_page_index(vma, in khugepaged_scan_mm_slot() local 1981 khugepaged_scan_file(mm, file, pgoff, hpage); in khugepaged_scan_mm_slot()
|
D | mlock.c | 523 pgoff_t pgoff; in mlock_fixup() local 535 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mlock_fixup() 537 vma->vm_file, pgoff, vma_policy(vma), in mlock_fixup()
|
D | internal.h | 347 pgoff_t pgoff = page_to_pgoff(page); in __vma_address() local 348 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in __vma_address()
|
D | shmem.c | 2023 vmf->pgoff >= shmem_falloc->start && in shmem_fault() 2024 vmf->pgoff < shmem_falloc->next) { in shmem_fault() 2066 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, in shmem_fault() 2075 unsigned long pgoff, unsigned long flags) in shmem_get_unmapped_area() argument 2089 addr = get_area(file, uaddr, len, pgoff, flags); in shmem_get_unmapped_area() 2134 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); in shmem_get_unmapped_area() 2317 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); in shmem_mfill_atomic_pte() local 2331 page = shmem_alloc_page(gfp, info, pgoff); in shmem_mfill_atomic_pte() 2372 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, in shmem_mfill_atomic_pte() 4071 unsigned long pgoff, unsigned long flags) in shmem_get_unmapped_area() argument [all …]
|
D | mempolicy.c | 738 pgoff_t pgoff; in mbind_range() local 758 pgoff = vma->vm_pgoff + in mbind_range() 761 vma->anon_vma, vma->vm_file, pgoff, in mbind_range() 2391 unsigned long pgoff; in mpol_misplaced() local 2403 pgoff = vma->vm_pgoff; in mpol_misplaced() 2404 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; in mpol_misplaced() 2405 polnid = offset_il_node(pol, pgoff); in mpol_misplaced()
|
D | madvise.c | 71 pgoff_t pgoff; in madvise_behavior() local 134 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior() 136 vma->vm_file, pgoff, vma_policy(vma), in madvise_behavior()
|
D | util.c | 485 unsigned long flag, unsigned long pgoff) in vm_mmap_pgoff() argument 496 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, in vm_mmap_pgoff()
|
D | filemap.c | 2389 pgoff_t offset = vmf->pgoff; in do_sync_mmap_readahead() 2438 pgoff_t offset = vmf->pgoff; in do_async_mmap_readahead() 2484 pgoff_t offset = vmf->pgoff; in filemap_fault()
|
D | huge_memory.c | 567 unsigned long len, unsigned long pgoff, unsigned long flags) in thp_get_unmapped_area() argument 570 loff_t off = (loff_t)pgoff << PAGE_SHIFT; in thp_get_unmapped_area() 579 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in thp_get_unmapped_area()
|
D | memcontrol.c | 5366 pgoff_t pgoff; in mc_handle_file_pte() local 5374 pgoff = linear_page_index(vma, addr); in mc_handle_file_pte() 5380 page = find_get_entry(mapping, pgoff); in mc_handle_file_pte() 5389 page = find_get_page(mapping, pgoff); in mc_handle_file_pte() 5391 page = find_get_page(mapping, pgoff); in mc_handle_file_pte()
|
D | hugetlb.c | 3677 pgoff_t pgoff; in unmap_ref_private() local 3684 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private() 3694 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private()
|
D | vmalloc.c | 3044 unsigned long pgoff) in remap_vmalloc_range() argument 3047 addr + (pgoff << PAGE_SHIFT), in remap_vmalloc_range()
|
D | memory.c | 3456 pgoff_t start_pgoff = vmf->pgoff; in do_fault_around() 3935 .pgoff = linear_page_index(vma, address), in __handle_mm_fault()
|