/mm/ |
D | madvise.c | 53 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument 92 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior() 110 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior() 126 if (end != vma->vm_end) { in madvise_behavior() 127 error = split_vma(mm, vma, end, 0); in madvise_behavior() 146 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument 155 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry() 181 unsigned long start, unsigned long end) in force_swapin_readahead() argument 189 walk_page_range(start, end, &walk); in force_swapin_readahead() 195 unsigned long start, unsigned long end, in force_shm_swapin_readahead() argument [all …]
|
D | pagewalk.c | 6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 18 if (addr == end) in walk_pte_range() 27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument 37 next = pmd_addr_end(addr, end); in walk_pmd_range() 67 } while (pmd++, addr = next, addr != end); in walk_pmd_range() 72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_pud_range() argument 81 next = pud_addr_end(addr, end); in walk_pud_range() 93 } while (pud++, addr = next, addr != end); in walk_pud_range() 98 static int walk_pgd_range(unsigned long addr, unsigned long end, in walk_pgd_range() argument 107 next = pgd_addr_end(addr, end); in walk_pgd_range() [all …]
|
D | mprotect.c | 65 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument 132 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range() 140 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument 154 next = pmd_addr_end(addr, end); in change_pmd_range() 162 mmu_notifier_invalidate_range_start(mm, mni_start, end); in change_pmd_range() 189 } while (pmd++, addr = next, addr != end); in change_pmd_range() 192 mmu_notifier_invalidate_range_end(mm, mni_start, end); in change_pmd_range() 200 pgd_t *pgd, unsigned long addr, unsigned long end, in change_pud_range() argument 209 next = pud_addr_end(addr, end); in change_pud_range() 214 } while (pud++, addr = next, addr != end); in change_pud_range() [all …]
|
D | bootmem.c | 93 unsigned long mapstart, unsigned long start, unsigned long end) in init_bootmem_core() argument 97 mminit_validate_memmodel_limits(&start, &end); in init_bootmem_core() 100 bdata->node_low_pfn = end; in init_bootmem_core() 107 mapsize = bootmap_bytes(end - start); in init_bootmem_core() 111 bdata - bootmem_node_data, start, mapstart, end, mapsize); in init_bootmem_core() 156 unsigned long cursor, end; in free_bootmem_late() local 161 end = PFN_DOWN(physaddr + size); in free_bootmem_late() 163 for (; cursor < end; cursor++) { in free_bootmem_late() 172 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local 179 end = bdata->node_low_pfn; in free_all_bootmem_core() [all …]
|
D | vmalloc.c | 62 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 70 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range() 73 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument 80 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 86 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 89 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument 96 next = pud_addr_end(addr, end); in vunmap_pud_range() 102 } while (pud++, addr = next, addr != end); in vunmap_pud_range() 105 static void vunmap_page_range(unsigned long addr, unsigned long end) in vunmap_page_range() argument 110 BUG_ON(addr >= end); in vunmap_page_range() [all …]
|
D | mincore.c | 23 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument 34 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb() 85 static int __mincore_unmapped_range(unsigned long addr, unsigned long end, in __mincore_unmapped_range() argument 88 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range() 104 static int mincore_unmapped_range(unsigned long addr, unsigned long end, in mincore_unmapped_range() argument 107 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range() 112 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument 119 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range() 129 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range() 134 for (; addr != end; ptep++, addr += PAGE_SIZE) { in mincore_pte_range() [all …]
|
D | truncate.c | 233 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() local 262 end = -1; in truncate_inode_pages_range() 264 end = (lend + 1) >> PAGE_SHIFT; in truncate_inode_pages_range() 268 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range() 269 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range() 276 if (index >= end) in truncate_inode_pages_range() 304 if (start > end) { in truncate_inode_pages_range() 320 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range() 336 if (start >= end) in truncate_inode_pages_range() 343 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) { in truncate_inode_pages_range() [all …]
|
D | mlock.c | 370 unsigned long end) in __munlock_pagevec_fill() argument 382 end = pgd_addr_end(start, end); in __munlock_pagevec_fill() 383 end = pud_addr_end(start, end); in __munlock_pagevec_fill() 384 end = pmd_addr_end(start, end); in __munlock_pagevec_fill() 388 while (start < end) { in __munlock_pagevec_fill() 439 unsigned long start, unsigned long end) in munlock_vma_pages_range() argument 443 while (start < end) { in munlock_vma_pages_range() 493 zoneid, start, end); in munlock_vma_pages_range() 515 unsigned long start, unsigned long end, vm_flags_t newflags) in mlock_fixup() argument 530 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, in mlock_fixup() [all …]
|
D | memblock.c | 121 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument 129 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up() 130 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up() 155 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument 164 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down() 165 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down() 202 phys_addr_t end, int nid, ulong flags) in memblock_find_in_range_node() argument 207 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) in memblock_find_in_range_node() 208 end = memblock.current_limit; in memblock_find_in_range_node() 212 end = max(start, end); in memblock_find_in_range_node() [all …]
|
D | memory.c | 220 …lb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) in tlb_gather_mmu() argument 225 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu() 243 if (!tlb->end) in tlb_flush_mmu_tlbonly() 247 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly() 275 void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) in tlb_finish_mmu() argument 302 VM_BUG_ON(!tlb->end); in __tlb_remove_page_size() 414 unsigned long addr, unsigned long end, in free_pmd_range() argument 424 next = pmd_addr_end(addr, end); in free_pmd_range() 428 } while (pmd++, addr = next, addr != end); in free_pmd_range() 438 if (end - 1 > ceiling - 1) in free_pmd_range() [all …]
|
D | nobootmem.c | 85 unsigned long cursor, end; in free_bootmem_late() local 90 end = PFN_DOWN(addr + size); in free_bootmem_late() 92 for (; cursor < end; cursor++) { in free_bootmem_late() 98 static void __init __free_pages_memory(unsigned long start, unsigned long end) in __free_pages_memory() argument 102 while (start < end) { in __free_pages_memory() 105 while (start + (1UL << order) > end) in __free_pages_memory() 115 phys_addr_t end) in __free_memory_core() argument 119 PFN_DOWN(end), max_low_pfn); in __free_memory_core() 132 phys_addr_t start, end; in free_low_memory_core_early() local 137 for_each_reserved_mem_region(i, &start, &end) in free_low_memory_core_early() [all …]
|
D | memtest.c | 36 u64 *p, *start, *end; in memtest() local 43 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest() 47 for (p = start; p < end; p++) in memtest() 50 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest() 65 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument 72 this_start = clamp(this_start, start, end); in do_one_pass() 73 this_end = clamp(this_end, start, end); in do_one_pass() 99 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument 110 do_one_pass(patterns[idx], start, end); in early_memtest()
|
D | msync.c | 33 unsigned long end; in SYSCALL_DEFINE3() local 47 end = start + len; in SYSCALL_DEFINE3() 48 if (end < start) in SYSCALL_DEFINE3() 51 if (end == start) in SYSCALL_DEFINE3() 70 if (start >= end) in SYSCALL_DEFINE3() 83 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3() 91 if (error || start >= end) in SYSCALL_DEFINE3() 96 if (start >= end) { in SYSCALL_DEFINE3()
|
D | gup.c | 1069 unsigned long start, unsigned long end, int *nonblocking) in populate_vma_page_range() argument 1072 unsigned long nr_pages = (end - start) / PAGE_SIZE; in populate_vma_page_range() 1076 VM_BUG_ON(end & ~PAGE_MASK); in populate_vma_page_range() 1078 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range() 1117 unsigned long end, nstart, nend; in __mm_populate() local 1124 end = start + len; in __mm_populate() 1126 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate() 1137 if (!vma || vma->vm_start >= end) in __mm_populate() 1143 nend = min(end, vma->vm_end); in __mm_populate() 1234 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, in gup_pte_range() argument [all …]
|
D | mempolicy.c | 483 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument 519 for (; addr != end; pte++, addr += PAGE_SIZE) { in queue_pages_pte_range() 558 unsigned long addr, unsigned long end, in queue_pages_hugetlb() argument 600 unsigned long addr, unsigned long end) in change_prot_numa() argument 604 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa() 612 unsigned long addr, unsigned long end) in change_prot_numa() argument 618 static int queue_pages_test_walk(unsigned long start, unsigned long end, in queue_pages_test_walk() argument 629 if (endvma > end) in queue_pages_test_walk() 630 endvma = end; in queue_pages_test_walk() 635 if (!vma->vm_next && vma->vm_end < end) in queue_pages_test_walk() [all …]
|
D | mmu_notifier.c | 111 unsigned long end) in __mmu_notifier_clear_flush_young() argument 119 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young() 128 unsigned long end) in __mmu_notifier_clear_young() argument 136 young |= mn->ops->clear_young(mn, mm, start, end); in __mmu_notifier_clear_young() 191 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_start() argument 199 mn->ops->invalidate_range_start(mn, mm, start, end); in __mmu_notifier_invalidate_range_start() 206 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_end() argument 222 mn->ops->invalidate_range(mn, mm, start, end); in __mmu_notifier_invalidate_range_end() 224 mn->ops->invalidate_range_end(mn, mm, start, end); in __mmu_notifier_invalidate_range_end() 231 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range() argument [all …]
|
D | page_poison.c | 113 unsigned char *end; in check_poison_mem() local 122 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem() 123 if (*end != PAGE_POISON) in check_poison_mem() 129 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem() 135 end - start + 1, 1); in check_poison_mem()
|
D | usercopy.c | 142 const void *end = ptr + n - 1; in check_page_span() local 154 end <= (const void *)__end_rodata) { in check_page_span() 161 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) in check_page_span() 166 end <= (const void *)__bss_stop) in check_page_span() 171 ((unsigned long)end & (unsigned long)PAGE_MASK))) in check_page_span() 175 endpage = virt_to_head_page(end); in check_page_span() 189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { in check_page_span()
|
D | mmap.c | 75 unsigned long start, unsigned long end); 488 unsigned long end, struct vm_area_struct **pprev, in find_vma_links() argument 504 if (vma_tmp->vm_start < end) in find_vma_links() 522 unsigned long addr, unsigned long end) in count_vma_pages_range() argument 528 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range() 532 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range() 539 if (vma->vm_start > end) in count_vma_pages_range() 542 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range() 679 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, in __vma_adjust() argument 695 if (end >= next->vm_end) { in __vma_adjust() [all …]
|
D | page_ext.c | 310 unsigned long start, end, pfn; in online_page_ext() local 314 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 326 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_ext() 335 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext() 344 unsigned long start, end, pfn; in offline_page_ext() local 347 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 349 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
|
D | sparse-vmemmap.c | 163 unsigned long start, unsigned long end) in vmemmap_verify() argument 170 start, end - 1); in vmemmap_verify() 224 unsigned long end, int node) in vmemmap_populate_basepages() argument 232 for (; addr < end; addr += PAGE_SIZE) { in vmemmap_populate_basepages() 254 unsigned long end; in sparse_mem_map_populate() local 259 end = (unsigned long)(map + PAGES_PER_SECTION); in sparse_mem_map_populate() 261 if (vmemmap_populate(start, end, nid)) in sparse_mem_map_populate()
|
D | percpu.c | 256 int *rs, int *re, int end) in pcpu_next_unpop() argument 258 *rs = find_next_zero_bit(chunk->populated, end, *rs); in pcpu_next_unpop() 259 *re = find_next_bit(chunk->populated, end, *rs + 1); in pcpu_next_unpop() 263 int *rs, int *re, int end) in pcpu_next_pop() argument 265 *rs = find_next_bit(chunk->populated, end, *rs); in pcpu_next_pop() 266 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); in pcpu_next_pop() 275 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ argument 276 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 278 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 280 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ argument [all …]
|
D | cma_debug.c | 54 unsigned long start, end = 0; in cma_maxchunk_get() local 59 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); in cma_maxchunk_get() 62 end = find_next_bit(cma->bitmap, bitmap_maxno, start); in cma_maxchunk_get() 63 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
|
D | page_counter.c | 178 char *end; in page_counter_memparse() local 186 bytes = memparse(buf, &end); in page_counter_memparse() 187 if (*end != '\0') in page_counter_memparse()
|
/mm/kasan/ |
D | kasan_init.c | 47 unsigned long end) in zero_pte_populate() argument 55 while (addr + PAGE_SIZE <= end) { in zero_pte_populate() 63 unsigned long end) in zero_pmd_populate() argument 69 next = pmd_addr_end(addr, end); in zero_pmd_populate() 71 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in zero_pmd_populate() 81 } while (pmd++, addr = next, addr != end); in zero_pmd_populate() 85 unsigned long end) in zero_pud_populate() argument 91 next = pud_addr_end(addr, end); in zero_pud_populate() 92 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate() 106 } while (pud++, addr = next, addr != end); in zero_pud_populate() [all …]
|