/mm/ |
D | madvise.c | 47 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 103 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior() 104 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() 114 if (start != vma->vm_start) { in madvise_behavior() 115 error = split_vma(mm, vma, start, 1); in madvise_behavior() 139 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument 149 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry() 155 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 156 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() [all …]
|
D | mlock.c | 228 unsigned long start, unsigned long end, int *nonblocking) in __mlock_vma_pages_range() argument 231 unsigned long nr_pages = (end - start) / PAGE_SIZE; in __mlock_vma_pages_range() 234 VM_BUG_ON(start & ~PAGE_MASK); in __mlock_vma_pages_range() 236 VM_BUG_ON_VMA(start < vma->vm_start, vma); in __mlock_vma_pages_range() 260 return __get_user_pages(current, mm, start, nr_pages, gup_flags, in __mlock_vma_pages_range() 417 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument 428 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill() 430 end = pgd_addr_end(start, end); in __munlock_pagevec_fill() 431 end = pud_addr_end(start, end); in __munlock_pagevec_fill() 432 end = pmd_addr_end(start, end); in __munlock_pagevec_fill() [all …]
|
D | bootmem.c | 96 unsigned long mapstart, unsigned long start, unsigned long end) in init_bootmem_core() argument 100 mminit_validate_memmodel_limits(&start, &end); in init_bootmem_core() 102 bdata->node_min_pfn = start; in init_bootmem_core() 110 mapsize = bootmap_bytes(end - start); in init_bootmem_core() 114 bdata - bootmem_node_data, start, mapstart, end, mapsize); in init_bootmem_core() 141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) in init_bootmem() argument 144 min_low_pfn = start; in init_bootmem() 145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); in init_bootmem() 175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local 181 start = bdata->node_min_pfn; in free_all_bootmem_core() [all …]
|
D | msync.c | 31 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument 41 if (start & ~PAGE_MASK) in SYSCALL_DEFINE3() 47 end = start + len; in SYSCALL_DEFINE3() 48 if (end < start) in SYSCALL_DEFINE3() 51 if (end == start) in SYSCALL_DEFINE3() 58 vma = find_vma(mm, start); in SYSCALL_DEFINE3() 68 if (start < vma->vm_start) { in SYSCALL_DEFINE3() 69 start = vma->vm_start; in SYSCALL_DEFINE3() 70 if (start >= end) in SYSCALL_DEFINE3() 81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3() [all …]
|
D | fremap.c | 142 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, in SYSCALL_DEFINE5() argument 161 start = start & PAGE_MASK; in SYSCALL_DEFINE5() 165 if (start + size <= start) in SYSCALL_DEFINE5() 181 vma = find_vma(mm, start); in SYSCALL_DEFINE5() 194 if (start < vma->vm_start || start + size > vma->vm_end) in SYSCALL_DEFINE5() 207 if (pgoff == linear_page_index(vma, start)) { in SYSCALL_DEFINE5() 231 addr = mmap_region(file, start, size, vm_flags, pgoff); in SYSCALL_DEFINE5() 236 BUG_ON(addr != start); in SYSCALL_DEFINE5() 257 munlock_vma_pages_range(vma, start, start + size); in SYSCALL_DEFINE5() 261 mmu_notifier_invalidate_range_start(mm, start, start + size); in SYSCALL_DEFINE5() [all …]
|
D | nobootmem.c | 85 static void __init __free_pages_memory(unsigned long start, unsigned long end) in __free_pages_memory() argument 89 while (start < end) { in __free_pages_memory() 90 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory() 92 while (start + (1UL << order) > end) in __free_pages_memory() 95 __free_pages_bootmem(pfn_to_page(start), start, order); in __free_pages_memory() 97 start += (1UL << order); in __free_pages_memory() 101 static unsigned long __init __free_memory_core(phys_addr_t start, in __free_memory_core() argument 104 unsigned long start_pfn = PFN_UP(start); in __free_memory_core() 119 phys_addr_t start, end; in free_low_memory_core_early() local 124 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) in free_low_memory_core_early() [all …]
|
D | mprotect.c | 217 unsigned long start = addr; in change_protection_range() local 234 flush_tlb_range(vma, start, end); in change_protection_range() 240 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument 247 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection() 249 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); in change_protection() 256 unsigned long start, unsigned long end, unsigned long newflags) in mprotect_fixup() argument 260 long nrpages = (end - start) >> PAGE_SHIFT; in mprotect_fixup() 290 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup() 291 *pprev = vma_merge(mm, *pprev, start, end, newflags, in mprotect_fixup() 301 if (start != vma->vm_start) { in mprotect_fixup() [all …]
|
D | readahead.c | 367 ra->start = offset; in try_context_readahead() 396 if ((offset == (ra->start + ra->size - ra->async_size) || in ondemand_readahead() 397 offset == (ra->start + ra->size))) { in ondemand_readahead() 398 ra->start += ra->size; in ondemand_readahead() 411 pgoff_t start; in ondemand_readahead() local 414 start = page_cache_next_hole(mapping, offset + 1, max); in ondemand_readahead() 417 if (!start || start - offset > max) in ondemand_readahead() 420 ra->start = start; in ondemand_readahead() 421 ra->size = start - offset; /* old async_size */ in ondemand_readahead() 457 ra->start = offset; in ondemand_readahead() [all …]
|
D | gup.c | 422 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 449 if (!vma || start >= vma->vm_end) { in __get_user_pages() 450 vma = find_extend_vma(mm, start); in __get_user_pages() 451 if (!vma && in_gate_area(mm, start)) { in __get_user_pages() 453 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages() 466 &start, &nr_pages, i, in __get_user_pages() 479 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages() 482 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages() 502 flush_anon_page(vma, page, start); in __get_user_pages() 511 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); in __get_user_pages() [all …]
|
D | memory_hotplug.c | 130 static struct resource *register_memory_resource(u64 start, u64 size) in register_memory_resource() argument 137 res->start = start; in register_memory_resource() 138 res->end = start + size - 1; in register_memory_resource() 761 resource_size_t start, size; in __remove_pages() local 770 start = phys_start_pfn << PAGE_SHIFT; in __remove_pages() 772 ret = release_mem_region_adjustable(&iomem_resource, start, size); in __remove_pages() 774 resource_size_t endres = start + size - 1; in __remove_pages() 777 &start, &endres, ret); in __remove_pages() 1081 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) in hotadd_new_pgdat() argument 1086 unsigned long start_pfn = PFN_DOWN(start); in hotadd_new_pgdat() [all …]
|
D | debug-pagealloc.c | 51 unsigned char *start; in check_poison_mem() local 54 start = memchr_inv(mem, PAGE_POISON, bytes); in check_poison_mem() 55 if (!start) in check_poison_mem() 58 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem() 65 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem() 70 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, in check_poison_mem() 71 end - start + 1, 1); in check_poison_mem()
|
D | mempolicy.c | 658 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, in queue_pages_range() argument 664 vma = find_vma(mm, start); in queue_pages_range() 673 if (vma->vm_start > start) in queue_pages_range() 674 start = vma->vm_start; in queue_pages_range() 686 change_prot_numa(vma, start, endvma); in queue_pages_range() 694 err = queue_pages_pgd_range(vma, start, endvma, nodes, in queue_pages_range() 742 static int mbind_range(struct mm_struct *mm, unsigned long start, in mbind_range() argument 753 vma = find_vma(mm, start); in mbind_range() 754 if (!vma || vma->vm_start > start) in mbind_range() 758 if (start > vma->vm_start) in mbind_range() [all …]
|
D | truncate.c | 246 pgoff_t start; /* inclusive */ in truncate_inode_pages_range() local 269 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in truncate_inode_pages_range() 281 index = start; in truncate_inode_pages_range() 315 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range() 318 if (start > end) { in truncate_inode_pages_range() 350 if (start >= end) in truncate_inode_pages_range() 353 index = start; in truncate_inode_pages_range() 359 if (index == start) in truncate_inode_pages_range() 362 index = start; in truncate_inode_pages_range() 365 if (index == start && indices[0] >= end) { in truncate_inode_pages_range() [all …]
|
D | nommu.c | 150 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument 167 vma = find_vma(mm, start); in __get_user_pages() 177 pages[i] = virt_to_page(start); in __get_user_pages() 183 start = (start + PAGE_SIZE) & PAGE_MASK; in __get_user_pages() 200 unsigned long start, unsigned long nr_pages, in get_user_pages() argument 211 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages() 691 long start = vma->vm_start & PAGE_MASK; in protect_vma() local 692 while (start < vma->vm_end) { in protect_vma() 693 protect_page(mm, start, flags); in protect_vma() 694 start += PAGE_SIZE; in protect_vma() [all …]
|
D | vmalloc.c | 178 static int vmap_page_range_noflush(unsigned long start, unsigned long end, in vmap_page_range_noflush() argument 183 unsigned long addr = start; in vmap_page_range_noflush() 199 static int vmap_page_range(unsigned long start, unsigned long end, in vmap_page_range() argument 204 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range() 205 flush_cache_vmap(start, end); in vmap_page_range() 526 static void vmap_debug_free_range(unsigned long start, unsigned long end) in vmap_debug_free_range() argument 542 vunmap_page_range(start, end); in vmap_debug_free_range() 543 flush_tlb_kernel_range(start, end); in vmap_debug_free_range() 596 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, in __purge_vmap_area_lazy() argument 622 if (va->va_start < *start) in __purge_vmap_area_lazy() [all …]
|
D | memblock.c | 117 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument 124 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up() 125 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up() 149 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument 156 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down() 157 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down() 192 phys_addr_t align, phys_addr_t start, in memblock_find_in_range_node() argument 202 start = max_t(phys_addr_t, start, PAGE_SIZE); in memblock_find_in_range_node() 203 end = max(start, end); in memblock_find_in_range_node() 214 bottom_up_start = max(start, kernel_end); in memblock_find_in_range_node() [all …]
|
D | sparse-vmemmap.c | 91 unsigned long start, unsigned long end) in vmemmap_verify() argument 98 "page_structs\n", start, end - 1); in vmemmap_verify() 151 int __meminit vmemmap_populate_basepages(unsigned long start, in vmemmap_populate_basepages() argument 154 unsigned long addr = start; in vmemmap_populate_basepages() 181 unsigned long start; in sparse_mem_map_populate() local 186 start = (unsigned long)map; in sparse_mem_map_populate() 189 if (vmemmap_populate(start, end, nid)) in sparse_mem_map_populate()
|
D | kmemleak.c | 124 unsigned long start; member 654 unsigned long start, end; in delete_object_part() local 673 start = object->pointer; in delete_object_part() 675 if (ptr > start) in delete_object_part() 676 create_object(start, ptr - start, object->min_count, in delete_object_part() 769 area->start = ptr; in add_scan_area() 1152 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); in scan_block() local 1155 for (ptr = start; ptr < end; ptr++) { in scan_block() 1233 void *start = (void *)object->pointer; in scan_object() local 1236 while (start < end && (object->flags & OBJECT_ALLOCATED) && in scan_object() [all …]
|
D | mmu_notifier.c | 110 unsigned long start, in __mmu_notifier_clear_flush_young() argument 119 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young() 174 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_start() argument 182 mn->ops->invalidate_range_start(mn, mm, start, end); in __mmu_notifier_invalidate_range_start() 189 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_end() argument 197 mn->ops->invalidate_range_end(mn, mm, start, end); in __mmu_notifier_invalidate_range_end()
|
D | page_cgroup.c | 196 unsigned long start, end, pfn; in online_page_cgroup() local 199 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_cgroup() 212 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_cgroup() 221 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_cgroup() 230 unsigned long start, end, pfn; in offline_page_cgroup() local 232 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_cgroup() 235 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_cgroup()
|
D | iov_iter.c | 448 size_t *start) in get_pages_iovec() argument 463 len += *start = addr & (PAGE_SIZE - 1); in get_pages_iovec() 471 return (res == n ? len : res * PAGE_SIZE) - *start; in get_pages_iovec() 476 size_t *start) in get_pages_alloc_iovec() argument 492 len += *start = addr & (PAGE_SIZE - 1); in get_pages_alloc_iovec() 508 return (res == n ? len : res * PAGE_SIZE) - *start; in get_pages_alloc_iovec() 778 size_t *start) in get_pages_bvec() argument 787 *start = bvec->bv_offset + i->iov_offset; in get_pages_bvec() 796 size_t *start) in get_pages_alloc_bvec() argument 804 *start = bvec->bv_offset + i->iov_offset; in get_pages_alloc_bvec() [all …]
|
D | memory.c | 216 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument 221 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu() 269 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) in tlb_finish_mmu() argument 405 unsigned long start; in free_pmd_range() local 407 start = addr; in free_pmd_range() 416 start &= PUD_MASK; in free_pmd_range() 417 if (start < floor) in free_pmd_range() 427 pmd = pmd_offset(pud, start); in free_pmd_range() 429 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() 438 unsigned long start; in free_pud_range() local [all …]
|
D | shmem.c | 92 pgoff_t start; /* start of range currently being fallocated */ member 404 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in shmem_undo_range() local 418 index = start; in shmem_undo_range() 458 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); in shmem_undo_range() 461 if (start > end) { in shmem_undo_range() 481 if (start >= end) in shmem_undo_range() 484 index = start; in shmem_undo_range() 493 if (index == start || end != -1) in shmem_undo_range() 496 index = start; in shmem_undo_range() 797 index >= shmem_falloc->start && in shmem_writepage() [all …]
|
D | mincore.c | 268 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument 276 if (start & ~PAGE_CACHE_MASK) in SYSCALL_DEFINE3() 280 if (!access_ok(VERIFY_READ, (void __user *) start, len)) in SYSCALL_DEFINE3() 301 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3() 312 start += retval << PAGE_SHIFT; in SYSCALL_DEFINE3()
|
D | sparse.c | 170 void __init memory_present(int nid, unsigned long start, unsigned long end) in memory_present() argument 174 start &= PAGE_SECTION_MASK; in memory_present() 175 mminit_validate_memmodel_limits(&start, &end); in memory_present() 176 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { in memory_present() 605 unsigned long start = (unsigned long)memmap; in __kfree_section_memmap() local 608 vmemmap_free(start, end); in __kfree_section_memmap() 613 unsigned long start = (unsigned long)memmap; in free_map_bootmem() local 616 vmemmap_free(start, end); in free_map_bootmem()
|