/mm/ |
D | pagewalk.c | 7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 20 if (addr == end) in walk_pte_range() 29 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument 40 next = pmd_addr_end(addr, end); in walk_pmd_range() 70 } while (pmd++, addr = next, addr != end); in walk_pmd_range() 75 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in walk_pud_range() argument 86 next = pud_addr_end(addr, end); in walk_pud_range() 115 } while (pud++, addr = next, addr != end); in walk_pud_range() 120 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_p4d_range() argument 130 next = p4d_addr_end(addr, end); in walk_p4d_range() [all …]
|
D | madvise.c | 67 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument 117 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 135 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() 155 if (end != vma->vm_end) { in madvise_behavior() 160 error = __split_vma(mm, vma, end, 0); in madvise_behavior() 184 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument 193 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry() 223 unsigned long start, unsigned long end, in force_shm_swapin_readahead() argument 230 for (; start < end; start += PAGE_SIZE) { in force_shm_swapin_readahead() 255 unsigned long start, unsigned long end) in madvise_willneed() argument [all …]
|
D | mprotect.c | 39 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument 157 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range() 165 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument 180 next = pmd_addr_end(addr, end); in change_pmd_range() 189 vma, vma->vm_mm, addr, end); in change_pmd_range() 217 } while (pmd++, addr = next, addr != end); in change_pmd_range() 228 p4d_t *p4d, unsigned long addr, unsigned long end, in change_pud_range() argument 237 next = pud_addr_end(addr, end); in change_pud_range() 242 } while (pud++, addr = next, addr != end); in change_pud_range() 248 pgd_t *pgd, unsigned long addr, unsigned long end, in change_p4d_range() argument [all …]
|
D | memblock.c | 190 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument 198 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up() 199 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up() 225 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument 234 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down() 235 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down() 273 phys_addr_t end, int nid, in memblock_find_in_range_node() argument 279 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || in memblock_find_in_range_node() 280 end == MEMBLOCK_ALLOC_KASAN) in memblock_find_in_range_node() 281 end = memblock.current_limit; in memblock_find_in_range_node() [all …]
|
D | hmm.c | 109 if (nrange->end < range->start || nrange->start >= range->end) in hmm_invalidate_range_start() 256 unsigned long end, in hmm_pfns_bad() argument 265 for (; addr < end; addr += PAGE_SIZE, i++) in hmm_pfns_bad() 283 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, in hmm_vma_walk_hole_() argument 298 for (; addr < end; addr += PAGE_SIZE, i++) { in hmm_vma_walk_hole_() 378 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, in hmm_vma_walk_hole() argument 388 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 392 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); in hmm_vma_walk_hole() 406 unsigned long end, uint64_t *pfns, pmd_t pmd) in hmm_vma_handle_pmd() argument 414 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() [all …]
|
D | gup.c | 1189 unsigned long start, unsigned long end, int *nonblocking) in populate_vma_page_range() argument 1192 unsigned long nr_pages = (end - start) / PAGE_SIZE; in populate_vma_page_range() 1196 VM_BUG_ON(end & ~PAGE_MASK); in populate_vma_page_range() 1198 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range() 1237 unsigned long end, nstart, nend; in __mm_populate() local 1242 end = start + len; in __mm_populate() 1244 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate() 1255 if (!vma || vma->vm_start >= end) in __mm_populate() 1261 nend = min(end, vma->vm_end); in __mm_populate() 1811 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, in gup_pte_range() argument [all …]
|
D | truncate.c | 61 pgoff_t end) in truncate_exceptional_pvec_entries() argument 78 lock = !dax && indices[j] < end; in truncate_exceptional_pvec_entries() 91 if (index >= end) in truncate_exceptional_pvec_entries() 295 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() local 323 end = -1; in truncate_inode_pages_range() 325 end = (lend + 1) >> PAGE_SHIFT; in truncate_inode_pages_range() 329 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range() 330 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range() 345 if (index >= end) in truncate_inode_pages_range() 369 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); in truncate_inode_pages_range() [all …]
|
D | mincore.c | 25 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument 36 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb() 95 static int __mincore_unmapped_range(unsigned long addr, unsigned long end, in __mincore_unmapped_range() argument 98 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() 114 static int mincore_unmapped_range(unsigned long addr, unsigned long end, in mincore_unmapped_range() argument 117 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range() 122 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument 129 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() 139 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() 144 for (; addr != end; ptep++, addr += PAGE_SIZE) { in mincore_pte_range() [all …]
|
D | mlock.c | 376 unsigned long start, unsigned long end) in __munlock_pagevec_fill() argument 388 end = pgd_addr_end(start, end); in __munlock_pagevec_fill() 389 end = p4d_addr_end(start, end); in __munlock_pagevec_fill() 390 end = pud_addr_end(start, end); in __munlock_pagevec_fill() 391 end = pmd_addr_end(start, end); in __munlock_pagevec_fill() 395 while (start < end) { in __munlock_pagevec_fill() 446 unsigned long start, unsigned long end) in munlock_vma_pages_range() argument 450 while (start < end) { in munlock_vma_pages_range() 498 zone, start, end); in munlock_vma_pages_range() 520 unsigned long start, unsigned long end, vm_flags_t newflags) in mlock_fixup() argument [all …]
|
D | vmalloc.c | 63 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 71 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range() 74 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument 81 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 87 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 90 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) in vunmap_pud_range() argument 97 next = pud_addr_end(addr, end); in vunmap_pud_range() 103 } while (pud++, addr = next, addr != end); in vunmap_pud_range() 106 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_p4d_range() argument 113 next = p4d_addr_end(addr, end); in vunmap_p4d_range() [all …]
|
D | memtest.c | 37 u64 *p, *start, *end; in memtest() local 44 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest() 48 for (p = start; p < end; p++) in memtest() 51 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest() 66 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument 73 this_start = clamp(this_start, start, end); in do_one_pass() 74 this_end = clamp(this_end, start, end); in do_one_pass() 100 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument 111 do_one_pass(patterns[idx], start, end); in early_memtest()
|
D | memory.c | 224 unsigned long addr, unsigned long end, in free_pmd_range() argument 234 next = pmd_addr_end(addr, end); in free_pmd_range() 238 } while (pmd++, addr = next, addr != end); in free_pmd_range() 248 if (end - 1 > ceiling - 1) in free_pmd_range() 258 unsigned long addr, unsigned long end, in free_pud_range() argument 268 next = pud_addr_end(addr, end); in free_pud_range() 272 } while (pud++, addr = next, addr != end); in free_pud_range() 282 if (end - 1 > ceiling - 1) in free_pud_range() 292 unsigned long addr, unsigned long end, in free_p4d_range() argument 302 next = p4d_addr_end(addr, end); in free_p4d_range() [all …]
|
D | msync.c | 34 unsigned long end; in SYSCALL_DEFINE3() local 50 end = start + len; in SYSCALL_DEFINE3() 51 if (end < start) in SYSCALL_DEFINE3() 54 if (end == start) in SYSCALL_DEFINE3() 73 if (start >= end) in SYSCALL_DEFINE3() 86 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() 94 if (error || start >= end) in SYSCALL_DEFINE3() 99 if (start >= end) { in SYSCALL_DEFINE3()
|
D | mempolicy.c | 442 unsigned long end, struct mm_walk *walk) in queue_pages_pmd() argument 491 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); in queue_pages_pte_range() 514 for (; addr != end; pte++, addr += PAGE_SIZE) { in queue_pages_pte_range() 551 return addr != end ? -EIO : 0; in queue_pages_pte_range() 555 unsigned long addr, unsigned long end, in queue_pages_hugetlb() argument 595 unsigned long addr, unsigned long end) in change_prot_numa() argument 599 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa() 607 unsigned long addr, unsigned long end) in change_prot_numa() argument 613 static int queue_pages_test_walk(unsigned long start, unsigned long end, in queue_pages_test_walk() argument [all …]
|
D | page_poison.c | 70 unsigned char *end; in check_poison_mem() local 79 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem() 80 if (*end != PAGE_POISON) in check_poison_mem() 86 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem() 92 end - start + 1, 1); in check_poison_mem()
|
D | sparse-vmemmap.c | 133 unsigned long start, unsigned long end) in vmemmap_verify() argument 140 start, end - 1); in vmemmap_verify() 217 unsigned long end, int node) in vmemmap_populate_basepages() argument 226 for (; addr < end; addr += PAGE_SIZE) { in vmemmap_populate_basepages() 252 unsigned long end; in __populate_section_memmap() local 259 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); in __populate_section_memmap() 261 nr_pages = end - pfn; in __populate_section_memmap() 264 end = start + nr_pages * sizeof(struct page); in __populate_section_memmap() 266 if (vmemmap_populate(start, end, nid, altmap)) in __populate_section_memmap()
|
D | percpu.c | 273 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) in pcpu_next_unpop() argument 275 *rs = find_next_zero_bit(bitmap, end, *rs); in pcpu_next_unpop() 276 *re = find_next_bit(bitmap, end, *rs + 1); in pcpu_next_unpop() 279 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) in pcpu_next_pop() argument 281 *rs = find_next_bit(bitmap, end, *rs); in pcpu_next_pop() 282 *re = find_next_zero_bit(bitmap, end, *rs + 1); in pcpu_next_pop() 290 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ argument 291 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ 293 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) 295 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ argument [all …]
|
D | usercopy.c | 164 const void *end = ptr + n - 1; in check_page_span() local 176 end <= (const void *)__end_rodata) { in check_page_span() 183 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) in check_page_span() 188 end <= (const void *)__bss_stop) in check_page_span() 193 ((unsigned long)end & (unsigned long)PAGE_MASK))) in check_page_span() 197 endpage = virt_to_head_page(end); in check_page_span() 211 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { in check_page_span()
|
D | mmap.c | 78 unsigned long start, unsigned long end); 529 unsigned long end, struct vm_area_struct **pprev, in find_vma_links() argument 545 if (vma_tmp->vm_start < end) in find_vma_links() 563 unsigned long addr, unsigned long end) in count_vma_pages_range() argument 569 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range() 573 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range() 580 if (vma->vm_start > end) in count_vma_pages_range() 583 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range() 720 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, in __vma_adjust() argument 736 if (end >= next->vm_end) { in __vma_adjust() [all …]
|
D | page_ext.c | 290 unsigned long start, end, pfn; in online_page_ext() local 294 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 306 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_ext() 315 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext() 324 unsigned long start, end, pfn; in offline_page_ext() local 327 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 329 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
|
D | percpu-stats.c | 56 int i, last_alloc, as_len, start, end; in chunk_map_stats() local 89 end = find_next_bit(chunk->bound_map, last_alloc, in chunk_map_stats() 93 end = find_next_bit(chunk->alloc_map, last_alloc, in chunk_map_stats() 98 alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE; in chunk_map_stats() 100 start = end; in chunk_map_stats()
|
D | mmu_notifier.c | 97 unsigned long end) in __mmu_notifier_clear_flush_young() argument 105 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young() 114 unsigned long end) in __mmu_notifier_clear_young() argument 122 young |= mn->ops->clear_young(mn, mm, start, end); in __mmu_notifier_clear_young() 218 range->end); in __mmu_notifier_invalidate_range_end() 232 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range() argument 240 mn->ops->invalidate_range(mn, mm, start, end); in __mmu_notifier_invalidate_range()
|
D | internal.h | 49 unsigned long addr, unsigned long end, 297 unsigned long start, unsigned long end, int *nonblocking); 299 unsigned long start, unsigned long end); 354 unsigned long start, end; in vma_address() local 357 end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); in vma_address() 360 VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma); in vma_address()
|
D | rodata_test.c | 17 unsigned long start, end; in rodata_test() local 42 end = (unsigned long)__end_rodata; in rodata_test() 47 if (end & (PAGE_SIZE - 1)) { in rodata_test()
|
/mm/kasan/ |
D | init.c | 97 unsigned long end) in zero_pte_populate() argument 106 while (addr + PAGE_SIZE <= end) { in zero_pte_populate() 114 unsigned long end) in zero_pmd_populate() argument 120 next = pmd_addr_end(addr, end); in zero_pmd_populate() 122 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in zero_pmd_populate() 141 } while (pmd++, addr = next, addr != end); in zero_pmd_populate() 147 unsigned long end) in zero_pud_populate() argument 153 next = pud_addr_end(addr, end); in zero_pud_populate() 154 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate() 178 } while (pud++, addr = next, addr != end); in zero_pud_populate() [all …]
|