Home
last modified time | relevance | path

Searched refs:end (Results 1 – 25 of 59) sorted by relevance

123

/mm/
Dpagewalk.c7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument
19 if (addr >= end - PAGE_SIZE) in walk_pte_range()
29 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument
40 next = pmd_addr_end(addr, end); in walk_pmd_range()
70 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
75 static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, in walk_pud_range() argument
86 next = pud_addr_end(addr, end); in walk_pud_range()
115 } while (pud++, addr = next, addr != end); in walk_pud_range()
120 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_p4d_range() argument
130 next = p4d_addr_end(addr, end); in walk_p4d_range()
[all …]
Dmadvise.c67 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument
117 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
135 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
155 if (end != vma->vm_end) { in madvise_behavior()
160 error = __split_vma(mm, vma, end, 0); in madvise_behavior()
184 unsigned long end, struct mm_walk *walk) in swapin_walk_pmd_entry() argument
193 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry()
223 unsigned long start, unsigned long end, in force_shm_swapin_readahead() argument
230 for (; start < end; start += PAGE_SIZE) { in force_shm_swapin_readahead()
255 unsigned long start, unsigned long end) in madvise_willneed() argument
[all …]
Dmprotect.c39 unsigned long addr, unsigned long end, pgprot_t newprot, in change_pte_range() argument
157 } while (pte++, addr += PAGE_SIZE, addr != end); in change_pte_range()
190 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument
205 next = pmd_addr_end(addr, end); in change_pmd_range()
223 vma, vma->vm_mm, addr, end); in change_pmd_range()
251 } while (pmd++, addr = next, addr != end); in change_pmd_range()
262 p4d_t *p4d, unsigned long addr, unsigned long end, in change_pud_range() argument
271 next = pud_addr_end(addr, end); in change_pud_range()
276 } while (pud++, addr = next, addr != end); in change_pud_range()
282 pgd_t *pgd, unsigned long addr, unsigned long end, in change_p4d_range() argument
[all …]
Dmemblock.c192 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument
200 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up()
201 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up()
227 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument
236 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down()
237 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down()
267 phys_addr_t end, int nid, in memblock_find_in_range_node() argument
271 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || in memblock_find_in_range_node()
272 end == MEMBLOCK_ALLOC_KASAN) in memblock_find_in_range_node()
273 end = memblock.current_limit; in memblock_find_in_range_node()
[all …]
Dhmm.c109 if (nrange->end < range->start || nrange->start >= range->end) in hmm_invalidate_range_start()
256 unsigned long end, in hmm_pfns_bad() argument
265 for (; addr < end; addr += PAGE_SIZE, i++) in hmm_pfns_bad()
283 static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, in hmm_vma_walk_hole_() argument
298 for (; addr < end; addr += PAGE_SIZE, i++) { in hmm_vma_walk_hole_()
378 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, in hmm_vma_walk_hole() argument
388 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole()
392 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); in hmm_vma_walk_hole()
406 unsigned long end, uint64_t *pfns, pmd_t pmd) in hmm_vma_handle_pmd() argument
414 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd()
[all …]
Dtruncate.c61 pgoff_t end) in truncate_exceptional_pvec_entries() argument
78 lock = !dax && indices[j] < end; in truncate_exceptional_pvec_entries()
91 if (index >= end) in truncate_exceptional_pvec_entries()
292 pgoff_t end; /* exclusive */ in truncate_inode_pages_range() local
320 end = -1; in truncate_inode_pages_range()
322 end = (lend + 1) >> PAGE_SHIFT; in truncate_inode_pages_range()
326 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range()
327 min(end - index, (pgoff_t)PAGEVEC_SIZE), in truncate_inode_pages_range()
342 if (index >= end) in truncate_inode_pages_range()
366 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); in truncate_inode_pages_range()
[all …]
Dmincore.c25 unsigned long end, struct mm_walk *walk) in mincore_hugetlb() argument
36 for (; addr != end; vec++, addr += PAGE_SIZE) in mincore_hugetlb()
95 static int __mincore_unmapped_range(unsigned long addr, unsigned long end, in __mincore_unmapped_range() argument
98 unsigned long nr = (end - addr) >> PAGE_SHIFT; in __mincore_unmapped_range()
114 static int mincore_unmapped_range(unsigned long addr, unsigned long end, in mincore_unmapped_range() argument
117 walk->private += __mincore_unmapped_range(addr, end, in mincore_unmapped_range()
122 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument
129 int nr = (end - addr) >> PAGE_SHIFT; in mincore_pte_range()
139 __mincore_unmapped_range(addr, end, vma, vec); in mincore_pte_range()
144 for (; addr != end; ptep++, addr += PAGE_SIZE) { in mincore_pte_range()
[all …]
Dgup.c1215 unsigned long start, unsigned long end, int *nonblocking) in populate_vma_page_range() argument
1218 unsigned long nr_pages = (end - start) / PAGE_SIZE; in populate_vma_page_range()
1222 VM_BUG_ON(end & ~PAGE_MASK); in populate_vma_page_range()
1224 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
1263 unsigned long end, nstart, nend; in __mm_populate() local
1268 end = start + len; in __mm_populate()
1270 for (nstart = start; nstart < end; nstart = nend) { in __mm_populate()
1281 if (!vma || vma->vm_start >= end) in __mm_populate()
1287 nend = min(end, vma->vm_end); in __mm_populate()
1837 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, in gup_pte_range() argument
[all …]
Dmlock.c376 unsigned long start, unsigned long end) in __munlock_pagevec_fill() argument
388 end = pgd_addr_end(start, end); in __munlock_pagevec_fill()
389 end = p4d_addr_end(start, end); in __munlock_pagevec_fill()
390 end = pud_addr_end(start, end); in __munlock_pagevec_fill()
391 end = pmd_addr_end(start, end); in __munlock_pagevec_fill()
395 while (start < end) { in __munlock_pagevec_fill()
446 unsigned long start, unsigned long end) in munlock_vma_pages_range() argument
450 while (start < end) { in munlock_vma_pages_range()
498 zone, start, end); in munlock_vma_pages_range()
520 unsigned long start, unsigned long end, vm_flags_t newflags) in mlock_fixup() argument
[all …]
Dvmalloc.c64 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument
72 } while (pte++, addr += PAGE_SIZE, addr != end); in vunmap_pte_range()
75 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument
82 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
90 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
93 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) in vunmap_pud_range() argument
100 next = pud_addr_end(addr, end); in vunmap_pud_range()
106 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
109 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_p4d_range() argument
116 next = p4d_addr_end(addr, end); in vunmap_p4d_range()
[all …]
Dmemtest.c37 u64 *p, *start, *end; in memtest() local
44 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest()
48 for (p = start; p < end; p++) in memtest()
51 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
66 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument
73 this_start = clamp(this_start, start, end); in do_one_pass()
74 this_end = clamp(this_end, start, end); in do_one_pass()
100 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument
111 do_one_pass(patterns[idx], start, end); in early_memtest()
Dmemory.c236 unsigned long addr, unsigned long end, in free_pmd_range() argument
246 next = pmd_addr_end(addr, end); in free_pmd_range()
250 } while (pmd++, addr = next, addr != end); in free_pmd_range()
260 if (end - 1 > ceiling - 1) in free_pmd_range()
270 unsigned long addr, unsigned long end, in free_pud_range() argument
280 next = pud_addr_end(addr, end); in free_pud_range()
284 } while (pud++, addr = next, addr != end); in free_pud_range()
294 if (end - 1 > ceiling - 1) in free_pud_range()
304 unsigned long addr, unsigned long end, in free_p4d_range() argument
314 next = p4d_addr_end(addr, end); in free_p4d_range()
[all …]
Dmsync.c34 unsigned long end; in SYSCALL_DEFINE3() local
50 end = start + len; in SYSCALL_DEFINE3()
51 if (end < start) in SYSCALL_DEFINE3()
54 if (end == start) in SYSCALL_DEFINE3()
73 if (start >= end) in SYSCALL_DEFINE3()
86 fend = fstart + (min(end, vma->vm_end) - start) - 1; in SYSCALL_DEFINE3()
94 if (error || start >= end) in SYSCALL_DEFINE3()
99 if (start >= end) { in SYSCALL_DEFINE3()
Dmempolicy.c442 unsigned long end, struct mm_walk *walk) in queue_pages_pmd() argument
491 unsigned long end, struct mm_walk *walk) in queue_pages_pte_range() argument
504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); in queue_pages_pte_range()
514 for (; addr != end; pte++, addr += PAGE_SIZE) { in queue_pages_pte_range()
551 return addr != end ? -EIO : 0; in queue_pages_pte_range()
555 unsigned long addr, unsigned long end, in queue_pages_hugetlb() argument
596 unsigned long addr, unsigned long end) in change_prot_numa() argument
600 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa()
608 unsigned long addr, unsigned long end) in change_prot_numa() argument
614 static int queue_pages_test_walk(unsigned long start, unsigned long end, in queue_pages_test_walk() argument
[all …]
Dpage_poison.c70 unsigned char *end; in check_poison_mem() local
79 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem()
80 if (*end != PAGE_POISON) in check_poison_mem()
86 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem()
92 end - start + 1, 1); in check_poison_mem()
Dsparse-vmemmap.c133 unsigned long start, unsigned long end) in vmemmap_verify() argument
140 start, end - 1); in vmemmap_verify()
217 unsigned long end, int node) in vmemmap_populate_basepages() argument
226 for (; addr < end; addr += PAGE_SIZE) { in vmemmap_populate_basepages()
252 unsigned long end; in __populate_section_memmap() local
259 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); in __populate_section_memmap()
261 nr_pages = end - pfn; in __populate_section_memmap()
264 end = start + nr_pages * sizeof(struct page); in __populate_section_memmap()
266 if (vmemmap_populate(start, end, nid, altmap)) in __populate_section_memmap()
Dpercpu.c273 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) in pcpu_next_unpop() argument
275 *rs = find_next_zero_bit(bitmap, end, *rs); in pcpu_next_unpop()
276 *re = find_next_bit(bitmap, end, *rs + 1); in pcpu_next_unpop()
279 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) in pcpu_next_pop() argument
281 *rs = find_next_bit(bitmap, end, *rs); in pcpu_next_pop()
282 *re = find_next_zero_bit(bitmap, end, *rs + 1); in pcpu_next_pop()
290 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ argument
291 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
293 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
295 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ argument
[all …]
Dusercopy.c164 const void *end = ptr + n - 1; in check_page_span() local
176 end <= (const void *)__end_rodata) { in check_page_span()
183 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) in check_page_span()
188 end <= (const void *)__bss_stop) in check_page_span()
193 ((unsigned long)end & (unsigned long)PAGE_MASK))) in check_page_span()
197 endpage = virt_to_head_page(end); in check_page_span()
211 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { in check_page_span()
Dmmap.c78 unsigned long start, unsigned long end);
527 unsigned long end, struct vm_area_struct **pprev, in find_vma_links() argument
543 if (vma_tmp->vm_start < end) in find_vma_links()
561 unsigned long addr, unsigned long end) in count_vma_pages_range() argument
567 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
571 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range()
578 if (vma->vm_start > end) in count_vma_pages_range()
581 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range()
718 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, in __vma_adjust() argument
734 if (end >= next->vm_end) { in __vma_adjust()
[all …]
Dpage_ext.c290 unsigned long start, end, pfn; in online_page_ext() local
294 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext()
306 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_ext()
315 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
324 unsigned long start, end, pfn; in offline_page_ext() local
327 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext()
329 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
Dpercpu-stats.c56 int i, last_alloc, as_len, start, end; in chunk_map_stats() local
89 end = find_next_bit(chunk->bound_map, last_alloc, in chunk_map_stats()
93 end = find_next_bit(chunk->alloc_map, last_alloc, in chunk_map_stats()
98 alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE; in chunk_map_stats()
100 start = end; in chunk_map_stats()
Dmmu_notifier.c97 unsigned long end) in __mmu_notifier_clear_flush_young() argument
105 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young()
114 unsigned long end) in __mmu_notifier_clear_young() argument
122 young |= mn->ops->clear_young(mn, mm, start, end); in __mmu_notifier_clear_young()
218 range->end); in __mmu_notifier_invalidate_range_end()
232 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range() argument
240 mn->ops->invalidate_range(mn, mm, start, end); in __mmu_notifier_invalidate_range()
Drodata_test.c17 unsigned long start, end; in rodata_test() local
42 end = (unsigned long)__end_rodata; in rodata_test()
47 if (end & (PAGE_SIZE - 1)) { in rodata_test()
/mm/kasan/
Dinit.c97 unsigned long end) in zero_pte_populate() argument
106 while (addr + PAGE_SIZE <= end) { in zero_pte_populate()
114 unsigned long end) in zero_pmd_populate() argument
120 next = pmd_addr_end(addr, end); in zero_pmd_populate()
122 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { in zero_pmd_populate()
141 } while (pmd++, addr = next, addr != end); in zero_pmd_populate()
147 unsigned long end) in zero_pud_populate() argument
153 next = pud_addr_end(addr, end); in zero_pud_populate()
154 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate()
178 } while (pud++, addr = next, addr != end); in zero_pud_populate()
[all …]
Dgeneric.c101 const void *end) in memory_is_nonzero() argument
107 if (end - start <= 16) in memory_is_nonzero()
108 return bytes_is_nonzero(start, end - start); in memory_is_nonzero()
118 words = (end - start) / 8; in memory_is_nonzero()
126 return bytes_is_nonzero(start, (end - start) % 8); in memory_is_nonzero()

123