Home
last modified time | relevance | path

Searched refs:start (Results 1 – 25 of 59) sorted by relevance

123

/mm/
Dmlock.c376 unsigned long start, unsigned long end) in __munlock_pagevec_fill() argument
386 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
388 end = pgd_addr_end(start, end); in __munlock_pagevec_fill()
389 end = p4d_addr_end(start, end); in __munlock_pagevec_fill()
390 end = pud_addr_end(start, end); in __munlock_pagevec_fill()
391 end = pmd_addr_end(start, end); in __munlock_pagevec_fill()
394 start += PAGE_SIZE; in __munlock_pagevec_fill()
395 while (start < end) { in __munlock_pagevec_fill()
399 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
419 start += PAGE_SIZE; in __munlock_pagevec_fill()
[all …]
Dmadvise.c67 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument
117 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
134 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
135 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
145 if (start != vma->vm_start) { in madvise_behavior()
150 error = __split_vma(mm, vma, start, 1); in madvise_behavior()
183 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument
193 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry()
199 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
200 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
[all …]
Dmsync.c32 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument
40 start = untagged_addr(start); in SYSCALL_DEFINE3()
44 if (offset_in_page(start)) in SYSCALL_DEFINE3()
50 end = start + len; in SYSCALL_DEFINE3()
51 if (end < start) in SYSCALL_DEFINE3()
54 if (end == start) in SYSCALL_DEFINE3()
61 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
71 if (start < vma->vm_start) { in SYSCALL_DEFINE3()
72 start = vma->vm_start; in SYSCALL_DEFINE3()
73 if (start >= end) in SYSCALL_DEFINE3()
[all …]
Dmprotect.c174 range.start = 0; in change_pmd_range()
186 if (!range.start) { in change_pmd_range()
219 if (range.start) in change_pmd_range()
274 unsigned long start = addr; in change_protection_range() local
291 flush_tlb_range(vma, start, end); in change_protection_range()
297 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
304 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
306 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); in change_protection()
340 unsigned long start, unsigned long end, unsigned long newflags) in mprotect_fixup() argument
344 long nrpages = (end - start) >> PAGE_SHIFT; in mprotect_fixup()
[all …]
Dmemory_hotplug.c104 static struct resource *register_memory_resource(u64 start, u64 size) in register_memory_resource() argument
110 if (start + size > max_mem_size) in register_memory_resource()
118 res = __request_region(&iomem_resource, start, size, in register_memory_resource()
123 start, start + size); in register_memory_resource()
886 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) in hotadd_new_pgdat() argument
889 unsigned long start_pfn = PFN_DOWN(start); in hotadd_new_pgdat()
965 static int __try_online_node(int nid, u64 start, bool set_node_online) in __try_online_node() argument
973 pgdat = hotadd_new_pgdat(nid, start); in __try_online_node()
1002 static int check_hotplug_memory_range(u64 start, u64 size) in check_hotplug_memory_range() argument
1005 if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || in check_hotplug_memory_range()
[all …]
Dgup.c780 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
791 start = untagged_addr(start); in __get_user_pages()
809 if (!vma || start >= vma->vm_end) { in __get_user_pages()
810 vma = find_extend_vma(mm, start); in __get_user_pages()
811 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
812 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages()
827 &start, &nr_pages, i, in __get_user_pages()
843 page = follow_page_mask(vma, start, foll_flags, &ctx); in __get_user_pages()
845 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
873 flush_anon_page(vma, page, start); in __get_user_pages()
[all …]
Dhmm.c109 if (nrange->end < range->start || nrange->start >= range->end) in hmm_invalidate_range_start()
264 i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_bad()
293 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole_()
387 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
553 unsigned long start, in hmm_vma_walk_pmd() argument
560 unsigned long addr = start, i; in hmm_vma_walk_pmd()
567 return hmm_vma_walk_hole(start, end, walk); in hmm_vma_walk_pmd()
574 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pmd()
587 return hmm_pfns_bad(start, end, walk); in hmm_vma_walk_pmd()
604 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_pmd()
[all …]
Dmemremap.c51 xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end), in pgmap_array_delete()
58 return PHYS_PFN(pgmap->res.start) + in pfn_first()
66 return (res->start + resource_size(res)) >> PAGE_SHIFT; in pfn_end()
123 __remove_pages(PHYS_PFN(res->start), in memunmap_pages()
126 arch_remove_memory(nid, res->start, resource_size(res), in memunmap_pages()
128 kasan_remove_zero_shadow(__va(res->start), resource_size(res)); in memunmap_pages()
132 untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); in memunmap_pages()
221 conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL); in memremap_pages()
237 is_ram = region_intersects(res->start, resource_size(res), in memremap_pages()
247 error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start), in memremap_pages()
[all …]
Dframe_vector.c34 int get_vaddr_frames(unsigned long start, unsigned int nr_frames, in get_vaddr_frames() argument
49 start = untagged_addr(start); in get_vaddr_frames()
53 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
75 ret = get_user_pages_locked(start, nr_frames, in get_vaddr_frames()
85 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames()
86 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames()
92 start += PAGE_SIZE; in get_vaddr_frames()
99 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames()
101 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
Dmemtest.c37 u64 *p, *start, *end; in memtest() local
43 start = __va(start_phys_aligned); in memtest()
44 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest()
48 for (p = start; p < end; p++) in memtest()
51 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
66 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument
73 this_start = clamp(this_start, start, end); in do_one_pass()
74 this_end = clamp(this_end, start, end); in do_one_pass()
100 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument
111 do_one_pass(patterns[idx], start, end); in early_memtest()
Dmemblock.c190 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument
198 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up()
199 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up()
225 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument
234 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down()
235 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down()
272 phys_addr_t align, phys_addr_t start, in memblock_find_in_range_node() argument
284 start = max_t(phys_addr_t, start, PAGE_SIZE); in memblock_find_in_range_node()
285 end = max(start, end); in memblock_find_in_range_node()
296 bottom_up_start = max(start, kernel_end); in memblock_find_in_range_node()
[all …]
Dpagewalk.c225 static int walk_page_test(unsigned long start, unsigned long end, in walk_page_test() argument
232 return ops->test_walk(start, end, walk); in walk_page_test()
245 err = ops->pte_hole(start, end, walk); in walk_page_test()
251 static int __walk_page_range(unsigned long start, unsigned long end, in __walk_page_range() argument
259 err = walk_hugetlb_range(start, end, walk); in __walk_page_range()
261 err = walk_pgd_range(start, end, walk); in __walk_page_range()
301 int walk_page_range(struct mm_struct *mm, unsigned long start, in walk_page_range() argument
314 if (start >= end) in walk_page_range()
322 vma = find_vma(walk.mm, start); in walk_page_range()
327 } else if (start < vma->vm_start) { /* outside vma */ in walk_page_range()
[all …]
Dpage_poison.c69 unsigned char *start; in check_poison_mem() local
75 start = memchr_inv(mem, PAGE_POISON, bytes); in check_poison_mem()
76 if (!start) in check_poison_mem()
79 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem()
86 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem()
91 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, in check_poison_mem()
92 end - start + 1, 1); in check_poison_mem()
Dpercpu.c290 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ argument
291 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
295 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ argument
296 for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
448 int start = pcpu_next_hint(block, alloc_bits); in pcpu_next_fit_region() local
451 start; in pcpu_next_fit_region()
452 *bit_off = pcpu_block_off_to_off(i, start); in pcpu_next_fit_region()
603 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) in pcpu_block_update() argument
605 int contig = end - start; in pcpu_block_update()
607 block->first_free = min(block->first_free, start); in pcpu_block_update()
[all …]
Dmempolicy.c613 static int queue_pages_test_walk(unsigned long start, unsigned long end, in queue_pages_test_walk() argument
631 if (vma->vm_start > start) in queue_pages_test_walk()
632 start = vma->vm_start; in queue_pages_test_walk()
648 change_prot_numa(vma, start, endvma); in queue_pages_test_walk()
680 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, in queue_pages_range() argument
691 return walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); in queue_pages_range()
731 static int mbind_range(struct mm_struct *mm, unsigned long start, in mbind_range() argument
742 vma = find_vma(mm, start); in mbind_range()
743 if (!vma || vma->vm_start > start) in mbind_range()
747 if (start > vma->vm_start) in mbind_range()
[all …]
Dmigrate.c1490 static int store_status(int __user *status, int start, int value, int nr) in store_status() argument
1493 if (put_user(value, status + start)) in store_status()
1495 start++; in store_status()
1602 int start, i; in do_pages_move() local
1607 for (i = start = 0; i < nr_pages; i++) { in do_pages_move()
1631 start = i; in do_pages_move()
1636 err = store_status(status, start, current_node, i - start); in do_pages_move()
1639 start = i; in do_pages_move()
1668 if (i > start) { in do_pages_move()
1669 err = store_status(status, start, current_node, i - start); in do_pages_move()
[all …]
Dtruncate.c294 pgoff_t start; /* inclusive */ in truncate_inode_pages_range() local
316 start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in truncate_inode_pages_range()
328 index = start; in truncate_inode_pages_range()
375 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range()
378 if (start > end) { in truncate_inode_pages_range()
410 if (start >= end) in truncate_inode_pages_range()
413 index = start; in truncate_inode_pages_range()
419 if (index == start) in truncate_inode_pages_range()
422 index = start; in truncate_inode_pages_range()
425 if (index == start && indices[0] >= end) { in truncate_inode_pages_range()
[all …]
Dreadahead.c371 ra->start = offset; in try_context_readahead()
409 if ((offset == (ra->start + ra->size - ra->async_size) || in ondemand_readahead()
410 offset == (ra->start + ra->size))) { in ondemand_readahead()
411 ra->start += ra->size; in ondemand_readahead()
424 pgoff_t start; in ondemand_readahead() local
427 start = page_cache_next_miss(mapping, offset + 1, max_pages); in ondemand_readahead()
430 if (!start || start - offset > max_pages) in ondemand_readahead()
433 ra->start = start; in ondemand_readahead()
434 ra->size = start - offset; /* old async_size */ in ondemand_readahead()
470 ra->start = offset; in ondemand_readahead()
[all …]
Dpercpu-stats.c56 int i, last_alloc, as_len, start, end; in chunk_map_stats() local
76 start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; in chunk_map_stats()
87 while (start < last_alloc) { in chunk_map_stats()
88 if (test_bit(start, chunk->alloc_map)) { in chunk_map_stats()
90 start + 1); in chunk_map_stats()
94 start + 1); in chunk_map_stats()
98 alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE; in chunk_map_stats()
100 start = end; in chunk_map_stats()
Dvmalloc.c220 static int vmap_page_range_noflush(unsigned long start, unsigned long end, in vmap_page_range_noflush() argument
225 unsigned long addr = start; in vmap_page_range_noflush()
241 static int vmap_page_range(unsigned long start, unsigned long end, in vmap_page_range() argument
246 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range()
247 flush_cache_vmap(start, end); in vmap_page_range()
1245 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) in __purge_vmap_area_lazy() argument
1269 if (va->va_start < start) in __purge_vmap_area_lazy()
1270 start = va->va_start; in __purge_vmap_area_lazy()
1275 flush_tlb_kernel_range(start, end); in __purge_vmap_area_lazy()
1669 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) in _vm_unmap_aliases() argument
[all …]
Dsparse-vmemmap.c133 unsigned long start, unsigned long end) in vmemmap_verify() argument
140 start, end - 1); in vmemmap_verify()
216 int __meminit vmemmap_populate_basepages(unsigned long start, in vmemmap_populate_basepages() argument
219 unsigned long addr = start; in vmemmap_populate_basepages()
251 unsigned long start; in __populate_section_memmap() local
263 start = (unsigned long) pfn_to_page(pfn); in __populate_section_memmap()
264 end = start + nr_pages * sizeof(struct page); in __populate_section_memmap()
266 if (vmemmap_populate(start, end, nid, altmap)) in __populate_section_memmap()
Dkmemleak.c122 unsigned long start; member
703 unsigned long start, end; in delete_object_part() local
719 start = object->pointer; in delete_object_part()
721 if (ptr > start) in delete_object_part()
722 create_object(start, ptr - start, object->min_count, in delete_object_part()
818 area->start = ptr; in add_scan_area()
1231 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); in scan_block() local
1237 for (ptr = start; ptr < end; ptr++) { in scan_block()
1301 static void scan_large_block(void *start, void *end) in scan_large_block() argument
1305 while (start < end) { in scan_large_block()
[all …]
Dmincore.c252 SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, in SYSCALL_DEFINE3() argument
259 start = untagged_addr(start); in SYSCALL_DEFINE3()
262 if (start & ~PAGE_MASK) in SYSCALL_DEFINE3()
266 if (!access_ok((void __user *) start, len)) in SYSCALL_DEFINE3()
287 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3()
298 start += retval << PAGE_SHIFT; in SYSCALL_DEFINE3()
Dmmap.c78 unsigned long start, unsigned long end);
719 int __vma_adjust(struct vm_area_struct *vma, unsigned long start, in __vma_adjust() argument
818 vma_adjust_trans_huge(orig_vma, start, end, adjust_next); in __vma_adjust()
859 if (start != vma->vm_start) { in __vma_adjust()
860 vma->vm_start = start; in __vma_adjust()
2565 unsigned long start; in find_extend_vma() local
2578 start = vma->vm_start; in find_extend_vma()
2582 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2620 unsigned long start, unsigned long end) in unmap_region() argument
2626 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
[all …]
/mm/kasan/
Dgeneric.c88 static __always_inline unsigned long bytes_is_nonzero(const u8 *start, in bytes_is_nonzero() argument
92 if (unlikely(*start)) in bytes_is_nonzero()
93 return (unsigned long)start; in bytes_is_nonzero()
94 start++; in bytes_is_nonzero()
101 static __always_inline unsigned long memory_is_nonzero(const void *start, in memory_is_nonzero() argument
106 unsigned int prefix = (unsigned long)start % 8; in memory_is_nonzero()
108 if (end - start <= 16) in memory_is_nonzero()
109 return bytes_is_nonzero(start, end - start); in memory_is_nonzero()
113 ret = bytes_is_nonzero(start, prefix); in memory_is_nonzero()
116 start += prefix; in memory_is_nonzero()
[all …]

123