Home
last modified time | relevance | path

Searched refs:start (Results 1 – 25 of 51) sorted by relevance

123

/mm/
Dmadvise.c48 unsigned long start, unsigned long end, int behavior) in madvise_behavior() argument
87 error = ksm_madvise(vma, start, end, behavior, &new_flags); in madvise_behavior()
104 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in madvise_behavior()
105 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, in madvise_behavior()
115 if (start != vma->vm_start) { in madvise_behavior()
116 error = split_vma(mm, vma, start, 1); in madvise_behavior()
140 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument
150 for (index = start; index != end; index += PAGE_SIZE) { in swapin_walk_pmd_entry()
156 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
157 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry()
[all …]
Dmlock.c361 struct vm_area_struct *vma, int zoneid, unsigned long start, in __munlock_pagevec_fill() argument
372 pte = get_locked_pte(vma->vm_mm, start, &ptl); in __munlock_pagevec_fill()
374 end = pgd_addr_end(start, end); in __munlock_pagevec_fill()
375 end = pud_addr_end(start, end); in __munlock_pagevec_fill()
376 end = pmd_addr_end(start, end); in __munlock_pagevec_fill()
379 start += PAGE_SIZE; in __munlock_pagevec_fill()
380 while (start < end) { in __munlock_pagevec_fill()
384 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
397 start += PAGE_SIZE; in __munlock_pagevec_fill()
402 return start; in __munlock_pagevec_fill()
[all …]
Dbootmem.c96 unsigned long mapstart, unsigned long start, unsigned long end) in init_bootmem_core() argument
100 mminit_validate_memmodel_limits(&start, &end); in init_bootmem_core()
102 bdata->node_min_pfn = start; in init_bootmem_core()
110 mapsize = bootmap_bytes(end - start); in init_bootmem_core()
114 bdata - bootmem_node_data, start, mapstart, end, mapsize); in init_bootmem_core()
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages) in init_bootmem() argument
144 min_low_pfn = start; in init_bootmem()
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); in init_bootmem()
175 unsigned long *map, start, end, pages, cur, count = 0; in free_all_bootmem_core() local
181 start = bdata->node_min_pfn; in free_all_bootmem_core()
[all …]
Dmsync.c31 SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) in SYSCALL_DEFINE3() argument
41 if (offset_in_page(start)) in SYSCALL_DEFINE3()
47 end = start + len; in SYSCALL_DEFINE3()
48 if (end < start) in SYSCALL_DEFINE3()
51 if (end == start) in SYSCALL_DEFINE3()
58 vma = find_vma(mm, start); in SYSCALL_DEFINE3()
68 if (start < vma->vm_start) { in SYSCALL_DEFINE3()
69 start = vma->vm_start; in SYSCALL_DEFINE3()
70 if (start >= end) in SYSCALL_DEFINE3()
81 fstart = (start - vma->vm_start) + in SYSCALL_DEFINE3()
[all …]
Dnobootmem.c94 static void __init __free_pages_memory(unsigned long start, unsigned long end) in __free_pages_memory() argument
98 while (start < end) { in __free_pages_memory()
99 order = min(MAX_ORDER - 1UL, __ffs(start)); in __free_pages_memory()
101 while (start + (1UL << order) > end) in __free_pages_memory()
104 __free_pages_bootmem(pfn_to_page(start), start, order); in __free_pages_memory()
106 start += (1UL << order); in __free_pages_memory()
110 static unsigned long __init __free_memory_core(phys_addr_t start, in __free_memory_core() argument
113 unsigned long start_pfn = PFN_UP(start); in __free_memory_core()
128 phys_addr_t start, end; in free_low_memory_core_early() local
133 for_each_reserved_mem_region(i, &start, &end) in free_low_memory_core_early()
[all …]
Dmprotect.c221 unsigned long start = addr; in change_protection_range() local
238 flush_tlb_range(vma, start, end); in change_protection_range()
244 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
251 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
253 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); in change_protection()
279 static int prot_none_walk(struct vm_area_struct *vma, unsigned long start, in prot_none_walk() argument
291 return walk_page_range(start, end, &prot_none_walk); in prot_none_walk()
296 unsigned long start, unsigned long end, unsigned long newflags) in mprotect_fixup() argument
300 long nrpages = (end - start) >> PAGE_SHIFT; in mprotect_fixup()
319 error = prot_none_walk(vma, start, end, newflags); in mprotect_fixup()
[all …]
Dgup.c478 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
505 if (!vma || start >= vma->vm_end) { in __get_user_pages()
506 vma = find_extend_vma(mm, start); in __get_user_pages()
507 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
509 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages()
524 &start, &nr_pages, i, in __get_user_pages()
541 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages()
544 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
570 flush_anon_page(vma, page, start); in __get_user_pages()
579 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); in __get_user_pages()
[all …]
Dpagewalk.c173 static int walk_page_test(unsigned long start, unsigned long end, in walk_page_test() argument
179 return walk->test_walk(start, end, walk); in walk_page_test()
192 err = walk->pte_hole(start, end, walk); in walk_page_test()
198 static int __walk_page_range(unsigned long start, unsigned long end, in __walk_page_range() argument
206 err = walk_hugetlb_range(start, end, walk); in __walk_page_range()
208 err = walk_pgd_range(start, end, walk); in __walk_page_range()
243 int walk_page_range(unsigned long start, unsigned long end, in walk_page_range() argument
250 if (start >= end) in walk_page_range()
258 vma = find_vma(walk->mm, start); in walk_page_range()
263 } else if (start < vma->vm_start) { /* outside vma */ in walk_page_range()
[all …]
Dmemtest.c36 u64 *p, *start, *end; in memtest() local
42 start = __va(start_phys_aligned); in memtest()
43 end = start + (size - (start_phys_aligned - start_phys)) / incr; in memtest()
47 for (p = start; p < end; p++) in memtest()
50 for (p = start; p < end; p++, start_phys_aligned += incr) { in memtest()
65 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end) in do_one_pass() argument
72 this_start = clamp(this_start, start, end); in do_one_pass()
73 this_end = clamp(this_end, start, end); in do_one_pass()
99 void __init early_memtest(phys_addr_t start, phys_addr_t end) in early_memtest() argument
110 do_one_pass(patterns[idx], start, end); in early_memtest()
Dmemory_hotplug.c131 static struct resource *register_memory_resource(u64 start, u64 size) in register_memory_resource() argument
138 res->start = start; in register_memory_resource()
139 res->end = start + size - 1; in register_memory_resource()
771 resource_size_t start, size; in __remove_pages() local
780 start = phys_start_pfn << PAGE_SHIFT; in __remove_pages()
785 ret = release_mem_region_adjustable(&iomem_resource, start, size); in __remove_pages()
787 resource_size_t endres = start + size - 1; in __remove_pages()
790 &start, &endres, ret); in __remove_pages()
1091 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) in hotadd_new_pgdat() argument
1096 unsigned long start_pfn = PFN_DOWN(start); in hotadd_new_pgdat()
[all …]
Dreadahead.c357 ra->start = offset; in try_context_readahead()
386 if ((offset == (ra->start + ra->size - ra->async_size) || in ondemand_readahead()
387 offset == (ra->start + ra->size))) { in ondemand_readahead()
388 ra->start += ra->size; in ondemand_readahead()
401 pgoff_t start; in ondemand_readahead() local
404 start = page_cache_next_hole(mapping, offset + 1, max); in ondemand_readahead()
407 if (!start || start - offset > max) in ondemand_readahead()
410 ra->start = start; in ondemand_readahead()
411 ra->size = start - offset; /* old async_size */ in ondemand_readahead()
447 ra->start = offset; in ondemand_readahead()
[all …]
Dframe_vector.c33 int get_vaddr_frames(unsigned long start, unsigned int nr_frames, in get_vaddr_frames() argument
50 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
58 ret = get_user_pages_locked(current, mm, start, nr_frames, in get_vaddr_frames()
68 while (ret < nr_frames && start + PAGE_SIZE <= vma->vm_end) { in get_vaddr_frames()
69 err = follow_pfn(vma, start, &nums[ret]); in get_vaddr_frames()
75 start += PAGE_SIZE; in get_vaddr_frames()
82 if (ret >= nr_frames || start < vma->vm_end) in get_vaddr_frames()
84 vma = find_vma_intersection(mm, start, start + 1); in get_vaddr_frames()
Ddebug-pagealloc.c89 unsigned char *start; in check_poison_mem() local
92 start = memchr_inv(mem, PAGE_POISON, bytes); in check_poison_mem()
93 if (!start) in check_poison_mem()
96 for (end = mem + bytes - 1; end > start; end--) { in check_poison_mem()
103 else if (start == end && single_bit_flip(*start, PAGE_POISON)) in check_poison_mem()
108 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, in check_poison_mem()
109 end - start + 1, 1); in check_poison_mem()
Dnommu.c136 unsigned long start, unsigned long nr_pages, in __get_user_pages() argument
153 vma = find_vma(mm, start); in __get_user_pages()
163 pages[i] = virt_to_page(start); in __get_user_pages()
169 start = (start + PAGE_SIZE) & PAGE_MASK; in __get_user_pages()
186 unsigned long start, unsigned long nr_pages, in get_user_pages() argument
190 return __get_user_pages(tsk, mm, start, nr_pages, in get_user_pages()
196 unsigned long start, unsigned long nr_pages, in get_user_pages_locked() argument
200 return get_user_pages(tsk, mm, start, nr_pages, gup_flags, in get_user_pages_locked()
206 unsigned long start, unsigned long nr_pages, in __get_user_pages_unlocked() argument
211 ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages, in __get_user_pages_unlocked()
[all …]
Dmempolicy.c590 static int queue_pages_test_walk(unsigned long start, unsigned long end, in queue_pages_test_walk() argument
603 if (vma->vm_start > start) in queue_pages_test_walk()
604 start = vma->vm_start; in queue_pages_test_walk()
618 change_prot_numa(vma, start, endvma); in queue_pages_test_walk()
638 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, in queue_pages_range() argument
656 return walk_page_range(start, end, &queue_pages_walk); in queue_pages_range()
696 static int mbind_range(struct mm_struct *mm, unsigned long start, in mbind_range() argument
707 vma = find_vma(mm, start); in mbind_range()
708 if (!vma || vma->vm_start > start) in mbind_range()
712 if (start > vma->vm_start) in mbind_range()
[all …]
Dtruncate.c221 pgoff_t start; /* inclusive */ in truncate_inode_pages_range() local
244 start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; in truncate_inode_pages_range()
256 index = start; in truncate_inode_pages_range()
290 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range()
293 if (start > end) { in truncate_inode_pages_range()
325 if (start >= end) in truncate_inode_pages_range()
328 index = start; in truncate_inode_pages_range()
334 if (index == start) in truncate_inode_pages_range()
337 index = start; in truncate_inode_pages_range()
340 if (index == start && indices[0] >= end) { in truncate_inode_pages_range()
[all …]
Dmmu_notifier.c110 unsigned long start, in __mmu_notifier_clear_flush_young() argument
119 young |= mn->ops->clear_flush_young(mn, mm, start, end); in __mmu_notifier_clear_flush_young()
127 unsigned long start, in __mmu_notifier_clear_young() argument
136 young |= mn->ops->clear_young(mn, mm, start, end); in __mmu_notifier_clear_young()
191 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_start() argument
199 mn->ops->invalidate_range_start(mn, mm, start, end); in __mmu_notifier_invalidate_range_start()
206 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range_end() argument
222 mn->ops->invalidate_range(mn, mm, start, end); in __mmu_notifier_invalidate_range_end()
224 mn->ops->invalidate_range_end(mn, mm, start, end); in __mmu_notifier_invalidate_range_end()
231 unsigned long start, unsigned long end) in __mmu_notifier_invalidate_range() argument
[all …]
Dvmalloc.c185 static int vmap_page_range_noflush(unsigned long start, unsigned long end, in vmap_page_range_noflush() argument
190 unsigned long addr = start; in vmap_page_range_noflush()
206 static int vmap_page_range(unsigned long start, unsigned long end, in vmap_page_range() argument
211 ret = vmap_page_range_noflush(start, end, prot, pages); in vmap_page_range()
212 flush_cache_vmap(start, end); in vmap_page_range()
536 static void vmap_debug_free_range(unsigned long start, unsigned long end) in vmap_debug_free_range() argument
552 vunmap_page_range(start, end); in vmap_debug_free_range()
553 flush_tlb_kernel_range(start, end); in vmap_debug_free_range()
606 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, in __purge_vmap_area_lazy() argument
632 if (va->va_start < *start) in __purge_vmap_area_lazy()
[all …]
Dmemblock.c124 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, in __memblock_find_range_bottom_up() argument
132 this_start = clamp(this_start, start, end); in __memblock_find_range_bottom_up()
133 this_end = clamp(this_end, start, end); in __memblock_find_range_bottom_up()
158 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, in __memblock_find_range_top_down() argument
167 this_start = clamp(this_start, start, end); in __memblock_find_range_top_down()
168 this_end = clamp(this_end, start, end); in __memblock_find_range_top_down()
196 phys_addr_t align, phys_addr_t start, in memblock_find_in_range_node() argument
204 start = max_t(phys_addr_t, start, PAGE_SIZE); in memblock_find_in_range_node()
205 end = max(start, end); in memblock_find_in_range_node()
208 return __memblock_find_range_bottom_up(start, end, size, align, in memblock_find_in_range_node()
[all …]
Dsparse-vmemmap.c91 unsigned long start, unsigned long end) in vmemmap_verify() argument
98 start, end - 1); in vmemmap_verify()
151 int __meminit vmemmap_populate_basepages(unsigned long start, in vmemmap_populate_basepages() argument
154 unsigned long addr = start; in vmemmap_populate_basepages()
181 unsigned long start; in sparse_mem_map_populate() local
186 start = (unsigned long)map; in sparse_mem_map_populate()
189 if (vmemmap_populate(start, end, nid)) in sparse_mem_map_populate()
Dkmemleak.c133 unsigned long start; member
668 unsigned long start, end; in delete_object_part() local
686 start = object->pointer; in delete_object_part()
688 if (ptr > start) in delete_object_part()
689 create_object(start, ptr - start, object->min_count, in delete_object_part()
782 area->start = ptr; in add_scan_area()
1169 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); in scan_block() local
1174 for (ptr = start; ptr < end; ptr++) { in scan_block()
1238 static void scan_large_block(void *start, void *end) in scan_large_block() argument
1242 while (start < end) { in scan_large_block()
[all …]
Dpage_ext.c280 unsigned long start, end, pfn; in online_page_ext() local
283 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_ext()
296 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { in online_page_ext()
305 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in online_page_ext()
314 unsigned long start, end, pfn; in offline_page_ext() local
316 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_ext()
319 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) in offline_page_ext()
Dmmap.c76 unsigned long start, unsigned long end);
754 int vma_adjust(struct vm_area_struct *vma, unsigned long start, in vma_adjust() argument
834 vma_adjust_trans_huge(vma, start, end, adjust_next); in vma_adjust()
855 if (start != vma->vm_start) { in vma_adjust()
856 vma->vm_start = start; in vma_adjust()
2442 unsigned long start; in find_extend_vma() local
2455 start = vma->vm_start; in find_extend_vma()
2459 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2497 unsigned long start, unsigned long end) in unmap_region() argument
2503 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
[all …]
Dcma_debug.c54 unsigned long start, end = 0; in cma_maxchunk_get() local
59 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); in cma_maxchunk_get()
60 if (start >= bitmap_maxno) in cma_maxchunk_get()
62 end = find_next_bit(cma->bitmap, bitmap_maxno, start); in cma_maxchunk_get()
63 maxchunk = max(end - start, maxchunk); in cma_maxchunk_get()
/mm/kasan/
Dkasan.c226 static __always_inline unsigned long bytes_is_zero(const u8 *start, in bytes_is_zero() argument
230 if (unlikely(*start)) in bytes_is_zero()
231 return (unsigned long)start; in bytes_is_zero()
232 start++; in bytes_is_zero()
239 static __always_inline unsigned long memory_is_zero(const void *start, in memory_is_zero() argument
244 unsigned int prefix = (unsigned long)start % 8; in memory_is_zero()
246 if (end - start <= 16) in memory_is_zero()
247 return bytes_is_zero(start, end - start); in memory_is_zero()
251 ret = bytes_is_zero(start, prefix); in memory_is_zero()
254 start += prefix; in memory_is_zero()
[all …]

123