Home
last modified time | relevance | path

Searched refs:next (Results 1 – 25 of 47) sorted by relevance

12

/mm/kasan/
Dinit.c117 unsigned long next; in zero_pmd_populate() local
120 next = pmd_addr_end(addr, end); in zero_pmd_populate()
140 zero_pte_populate(pmd, addr, next); in zero_pmd_populate()
141 } while (pmd++, addr = next, addr != end); in zero_pmd_populate()
150 unsigned long next; in zero_pud_populate() local
153 next = pud_addr_end(addr, end); in zero_pud_populate()
177 zero_pmd_populate(pud, addr, next); in zero_pud_populate()
178 } while (pud++, addr = next, addr != end); in zero_pud_populate()
187 unsigned long next; in zero_p4d_populate() local
190 next = p4d_addr_end(addr, end); in zero_p4d_populate()
[all …]
Dquarantine.c67 q->tail->next = qlink; in qlist_put()
69 qlink->next = NULL; in qlist_put()
84 to->tail->next = from->head; in qlist_move_all()
165 struct qlist_node *next = qlink->next; in qlist_free_all() local
168 qlink = next; in qlist_free_all()
276 struct qlist_node *next = curr->next; in qlist_move_cache() local
284 curr = next; in qlist_move_cache()
/mm/
Dpagewalk.c33 unsigned long next; in walk_pmd_range() local
40 next = pmd_addr_end(addr, end); in walk_pmd_range()
43 err = ops->pte_hole(addr, next, walk); in walk_pmd_range()
53 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range()
67 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range()
70 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
79 unsigned long next; in walk_pud_range() local
86 next = pud_addr_end(addr, end); in walk_pud_range()
89 err = ops->pte_hole(addr, next, walk); in walk_pud_range()
99 err = ops->pud_entry(pud, addr, next, walk); in walk_pud_range()
[all …]
Dmmap.c173 struct vm_area_struct *next = vma->vm_next; in remove_vma() local
182 return next; in remove_vma()
192 struct vm_area_struct *next; in SYSCALL_DEFINE1() local
262 next = find_vma(mm, oldbrk); in SYSCALL_DEFINE1()
263 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) in SYSCALL_DEFINE1()
685 struct vm_area_struct *next; in __vma_unlink_common() local
688 next = vma->vm_next; in __vma_unlink_common()
690 prev->vm_next = next; in __vma_unlink_common()
694 prev->vm_next = next; in __vma_unlink_common()
696 mm->mmap = next; in __vma_unlink_common()
[all …]
Dmprotect.c169 unsigned long next; in change_pmd_range() local
180 next = pmd_addr_end(addr, end); in change_pmd_range()
183 goto next; in change_pmd_range()
194 if (next - addr != HPAGE_PMD_SIZE) { in change_pmd_range()
207 goto next; in change_pmd_range()
212 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range()
215 next: in change_pmd_range()
217 } while (pmd++, addr = next, addr != end); in change_pmd_range()
232 unsigned long next; in change_pud_range() local
237 next = pud_addr_end(addr, end); in change_pud_range()
[all …]
Dmmu_gather.c21 if (batch->next) { in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
34 batch->next = NULL; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
57 struct mmu_gather_batch *batch, *next; in tlb_batch_list_free() local
59 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
60 next = batch->next; in tlb_batch_list_free()
63 tlb->local.next = NULL; in tlb_batch_list_free()
216 tlb->local.next = NULL; in tlb_gather_mmu()
Dslob.c146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument
149 slobidx_t offset = next - base; in set_slob()
174 slobidx_t next; in slob_next() local
177 next = -s[0].units; in slob_next()
179 next = s[1].units; in slob_next()
180 return base+next; in slob_next()
261 slob_t *next; in slob_page_alloc() local
264 next = slob_next(cur); in slob_page_alloc()
265 set_slob(aligned, avail - delta, next); in slob_page_alloc()
272 next = slob_next(cur); in slob_page_alloc()
[all …]
Dmemory.c228 unsigned long next; in free_pmd_range() local
234 next = pmd_addr_end(addr, end); in free_pmd_range()
238 } while (pmd++, addr = next, addr != end); in free_pmd_range()
262 unsigned long next; in free_pud_range() local
268 next = pud_addr_end(addr, end); in free_pud_range()
271 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range()
272 } while (pud++, addr = next, addr != end); in free_pud_range()
296 unsigned long next; in free_p4d_range() local
302 next = p4d_addr_end(addr, end); in free_p4d_range()
305 free_pud_range(tlb, p4d, addr, next, floor, ceiling); in free_p4d_range()
[all …]
Ddmapool.c69 char *next; in show_pools() local
73 next = buf; in show_pools()
76 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools()
78 next += temp; in show_pools()
93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", in show_pools()
98 next += temp; in show_pools()
211 unsigned int next = offset + pool->size; in pool_initialise_page() local
212 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page()
213 next = next_boundary; in pool_initialise_page()
216 *(int *)(page->vaddr + offset) = next; in pool_initialise_page()
[all …]
Dvmalloc.c77 unsigned long next; in vunmap_pmd_range() local
81 next = pmd_addr_end(addr, end); in vunmap_pmd_range()
86 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range()
87 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range()
93 unsigned long next; in vunmap_pud_range() local
97 next = pud_addr_end(addr, end); in vunmap_pud_range()
102 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range()
103 } while (pud++, addr = next, addr != end); in vunmap_pud_range()
109 unsigned long next; in vunmap_p4d_range() local
113 next = p4d_addr_end(addr, end); in vunmap_p4d_range()
[all …]
Dgup_benchmark.c26 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local
42 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { in __gup_benchmark_ioctl()
46 next = addr + nr * PAGE_SIZE; in __gup_benchmark_ioctl()
47 if (next > gup->addr + gup->size) { in __gup_benchmark_ioctl()
48 next = gup->addr + gup->size; in __gup_benchmark_ioctl()
49 nr = (next - addr) / PAGE_SIZE; in __gup_benchmark_ioctl()
Dgup.c2032 unsigned long next; in gup_huge_pd() local
2036 next = hugepte_addr_end(addr, end, sz); in gup_huge_pd()
2039 } while (ptep++, addr = next, addr != end); in gup_huge_pd()
2175 unsigned long next; in gup_pmd_range() local
2182 next = pmd_addr_end(addr, end); in gup_pmd_range()
2196 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, in gup_pmd_range()
2206 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2208 } else if (!gup_pte_range(pmd, addr, next, flags, pages, nr)) in gup_pmd_range()
2210 } while (pmdp++, addr = next, addr != end); in gup_pmd_range()
2218 unsigned long next; in gup_pud_range() local
[all …]
Dswapfile.c612 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster()
632 tmp = cluster->next; in scan_swap_map_try_ssd_cluster()
652 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster()
995 struct swap_info_struct *si, *next; in get_swap_pages() local
1019 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in get_swap_pages()
1065 if (plist_node_empty(&next->avail_lists[node])) in get_swap_pages()
1983 unsigned long next; in unuse_pmd_range() local
1989 next = pmd_addr_end(addr, end); in unuse_pmd_range()
1992 ret = unuse_pte_range(vma, pmd, addr, next, type, in unuse_pmd_range()
1996 } while (pmd++, addr = next, addr != end); in unuse_pmd_range()
[all …]
Dutil.c276 struct vm_area_struct *next; in __vma_link_list() local
280 next = prev->vm_next; in __vma_link_list()
285 next = rb_entry(rb_parent, in __vma_link_list()
288 next = NULL; in __vma_link_list()
290 vma->vm_next = next; in __vma_link_list()
291 if (next) in __vma_link_list()
292 next->vm_prev = vma; in __vma_link_list()
Dkmemleak.c1303 void *next; in scan_large_block() local
1306 next = min(start + MAX_SCAN_SIZE, end); in scan_large_block()
1307 scan_block(start, next, NULL); in scan_large_block()
1308 start = next; in scan_large_block()
1337 void *next; in scan_object() local
1340 next = min(start + MAX_SCAN_SIZE, end); in scan_object()
1341 scan_block(start, next, object); in scan_object()
1343 start = next; in scan_object()
1373 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list()
1381 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list()
[all …]
Dfrontswap.c33 for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next)
140 ops->next = frontswap_ops; in frontswap_register_ops()
141 } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next); in frontswap_register_ops()
Dmempolicy.c734 struct vm_area_struct *next; in mbind_range() local
750 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
751 next = vma->vm_next; in mbind_range()
766 next = vma->vm_next; in mbind_range()
1829 unsigned next; in interleave_nodes() local
1832 next = next_node_in(me->il_prev, policy->v.nodes); in interleave_nodes()
1833 if (next < MAX_NUMNODES) in interleave_nodes()
1834 me->il_prev = next; in interleave_nodes()
1835 return next; in interleave_nodes()
2519 struct rb_node *next = rb_next(&n->nd); in shared_policy_replace() local
[all …]
Dbacking-dev.c587 blkcg_cgwb_list->next && memcg_cgwb_list->next) { in cgwb_create()
746 struct bdi_writeback *wb, *next; in wb_memcg_offline() local
749 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) in wb_memcg_offline()
751 memcg_cgwb_list->next = NULL; /* prevent new wb's */ in wb_memcg_offline()
763 struct bdi_writeback *wb, *next; in wb_blkcg_offline() local
766 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) in wb_blkcg_offline()
768 blkcg->cgwb_list.next = NULL; /* prevent new wb's */ in wb_blkcg_offline()
Dmremap.c245 unsigned long extent, next, old_end; in move_page_tables() local
258 next = (old_addr + PMD_SIZE) & PMD_MASK; in move_page_tables()
260 extent = next - old_addr; in move_page_tables()
306 next = (new_addr + PMD_SIZE) & PMD_MASK; in move_page_tables()
307 if (extent > next - new_addr) in move_page_tables()
308 extent = next - new_addr; in move_page_tables()
Dmadvise.c320 unsigned long next = pmd_addr_end(addr, end); in madvise_cold_or_pageout_pte_range() local
338 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range()
567 unsigned long next; in madvise_free_pte_range() local
569 next = pmd_addr_end(addr, end); in madvise_free_pte_range()
571 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range()
572 goto next; in madvise_free_pte_range()
685 next: in madvise_free_pte_range()
Doom_kill.c315 goto next; in oom_evaluate_task()
319 goto next; in oom_evaluate_task()
329 goto next; in oom_evaluate_task()
344 goto next; in oom_evaluate_task()
352 next: in oom_evaluate_task()
Dmemblock.c518 struct memblock_region *next = &type->regions[i + 1]; in memblock_merge_regions() local
520 if (this->base + this->size != next->base || in memblock_merge_regions()
522 memblock_get_region_node(next) || in memblock_merge_regions()
523 this->flags != next->flags) { in memblock_merge_regions()
524 BUG_ON(this->base + this->size > next->base); in memblock_merge_regions()
529 this->size += next->size; in memblock_merge_regions()
531 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); in memblock_merge_regions()
Dksm.c639 if (rmap_item->hlist.next) in remove_node_from_stable_tree()
940 struct stable_node *stable_node, *next; in remove_all_stable_nodes() local
956 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { in remove_all_stable_nodes()
972 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, in unmerge_and_remove_all_rmap_items()
995 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, in unmerge_and_remove_all_rmap_items()
1422 BUG_ON(stable_node->hlist.first->next); in stable_node_dup()
2020 if (rmap_item->hlist.next) in stable_tree_append()
2248 struct stable_node *stable_node, *next; in scan_get_next_rmap_item() local
2251 list_for_each_entry_safe(stable_node, next, in scan_get_next_rmap_item()
2265 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); in scan_get_next_rmap_item()
[all …]
Dhuge_memory.c1704 pmd_t *pmd, unsigned long addr, unsigned long next) in madvise_free_huge_pmd() argument
1743 if (next - addr != HPAGE_PMD_SIZE) { in madvise_free_huge_pmd()
2409 struct vm_area_struct *next = vma->vm_next; in vma_adjust_trans_huge() local
2410 unsigned long nstart = next->vm_start; in vma_adjust_trans_huge()
2413 (nstart & HPAGE_PMD_MASK) >= next->vm_start && in vma_adjust_trans_huge()
2414 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) in vma_adjust_trans_huge()
2415 split_huge_pmd_address(next, nstart, false, NULL); in vma_adjust_trans_huge()
2916 LIST_HEAD(list), *pos, *next; in deferred_split_scan()
2927 list_for_each_safe(pos, next, &ds_queue->split_queue) { in deferred_split_scan()
2942 list_for_each_safe(pos, next, &list) { in deferred_split_scan()
[all …]
Dzbud.c536 goto next; in zbud_reclaim_page()
541 goto next; in zbud_reclaim_page()
543 next: in zbud_reclaim_page()

12