/mm/kasan/ |
D | init.c | 113 unsigned long next; in zero_pmd_populate() local 116 next = pmd_addr_end(addr, end); in zero_pmd_populate() 136 zero_pte_populate(pmd, addr, next); in zero_pmd_populate() 137 } while (pmd++, addr = next, addr != end); in zero_pmd_populate() 146 unsigned long next; in zero_pud_populate() local 149 next = pud_addr_end(addr, end); in zero_pud_populate() 173 zero_pmd_populate(pud, addr, next); in zero_pud_populate() 174 } while (pud++, addr = next, addr != end); in zero_pud_populate() 183 unsigned long next; in zero_p4d_populate() local 186 next = p4d_addr_end(addr, end); in zero_p4d_populate() [all …]
|
D | quarantine.c | 59 q->tail->next = qlink; in qlist_put() 61 qlink->next = NULL; in qlist_put() 76 to->tail->next = from->head; in qlist_move_all() 163 struct qlist_node *next = qlink->next; in qlist_free_all() local 166 qlink = next; in qlist_free_all() 288 struct qlist_node *next = curr->next; in qlist_move_cache() local 296 curr = next; in qlist_move_cache()
|
/mm/ |
D | pagewalk.c | 65 unsigned long next; in walk_pmd_range() local 73 next = pmd_addr_end(addr, end); in walk_pmd_range() 76 err = ops->pte_hole(addr, next, depth, walk); in walk_pmd_range() 89 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 111 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() 114 } while (pmd++, addr = next, addr != end); in walk_pmd_range() 123 unsigned long next; in walk_pud_range() local 131 next = pud_addr_end(addr, end); in walk_pud_range() 134 err = ops->pte_hole(addr, next, depth, walk); in walk_pud_range() 143 err = ops->pud_entry(pud, addr, next, walk); in walk_pud_range() [all …]
|
D | ioremap.c | 111 unsigned long next; in ioremap_pmd_range() local 117 next = pmd_addr_end(addr, end); in ioremap_pmd_range() 119 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { in ioremap_pmd_range() 124 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot, mask)) in ioremap_pmd_range() 126 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); in ioremap_pmd_range() 157 unsigned long next; in ioremap_pud_range() local 163 next = pud_addr_end(addr, end); in ioremap_pud_range() 165 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot)) { in ioremap_pud_range() 170 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot, mask)) in ioremap_pud_range() 172 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); in ioremap_pud_range() [all …]
|
D | mmap.c | 198 struct vm_area_struct *next = vma->vm_next; in remove_vma() local 204 return next; in remove_vma() 214 struct vm_area_struct *next; in SYSCALL_DEFINE1() local 282 next = find_vma(mm, oldbrk); in SYSCALL_DEFINE1() 283 if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) in SYSCALL_DEFINE1() 783 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; in __vma_adjust() local 793 if (next) in __vma_adjust() 794 vm_write_begin(next); in __vma_adjust() 796 if (next && !insert) { in __vma_adjust() 799 if (end >= next->vm_end) { in __vma_adjust() [all …]
|
D | mprotect.c | 217 unsigned long next; in change_pmd_range() local 228 next = pmd_addr_end(addr, end); in change_pmd_range() 240 goto next; in change_pmd_range() 251 if (next - addr != HPAGE_PMD_SIZE) { in change_pmd_range() 264 goto next; in change_pmd_range() 269 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range() 272 next: in change_pmd_range() 274 } while (pmd++, addr = next, addr != end); in change_pmd_range() 289 unsigned long next; in change_pud_range() local 294 next = pud_addr_end(addr, end); in change_pud_range() [all …]
|
D | mmu_gather.c | 21 if (batch->next) { in tlb_next_batch() 22 tlb->active = batch->next; in tlb_next_batch() 34 batch->next = NULL; in tlb_next_batch() 38 tlb->active->next = batch; in tlb_next_batch() 48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush() 57 struct mmu_gather_batch *batch, *next; in tlb_batch_list_free() local 59 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free() 60 next = batch->next; in tlb_batch_list_free() 63 tlb->local.next = NULL; in tlb_batch_list_free() 272 tlb->local.next = NULL; in tlb_gather_mmu()
|
D | slob.c | 146 static void set_slob(slob_t *s, slobidx_t size, slob_t *next) in set_slob() argument 149 slobidx_t offset = next - base; in set_slob() 174 slobidx_t next; in slob_next() local 177 next = -s[0].units; in slob_next() 179 next = s[1].units; in slob_next() 180 return base+next; in slob_next() 261 slob_t *next; in slob_page_alloc() local 264 next = slob_next(cur); in slob_page_alloc() 265 set_slob(aligned, avail - delta, next); in slob_page_alloc() 272 next = slob_next(cur); in slob_page_alloc() [all …]
|
D | memory.c | 269 unsigned long next; in free_pmd_range() local 275 next = pmd_addr_end(addr, end); in free_pmd_range() 279 } while (pmd++, addr = next, addr != end); in free_pmd_range() 303 unsigned long next; in free_pud_range() local 309 next = pud_addr_end(addr, end); in free_pud_range() 312 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range() 313 } while (pud++, addr = next, addr != end); in free_pud_range() 337 unsigned long next; in free_p4d_range() local 343 next = p4d_addr_end(addr, end); in free_p4d_range() 346 free_pud_range(tlb, p4d, addr, next, floor, ceiling); in free_p4d_range() [all …]
|
D | vmalloc.c | 90 unsigned long next; in vunmap_pmd_range() local 95 next = pmd_addr_end(addr, end); in vunmap_pmd_range() 105 vunmap_pte_range(pmd, addr, next, mask); in vunmap_pmd_range() 108 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 115 unsigned long next; in vunmap_pud_range() local 120 next = pud_addr_end(addr, end); in vunmap_pud_range() 130 vunmap_pmd_range(pud, addr, next, mask); in vunmap_pud_range() 131 } while (pud++, addr = next, addr != end); in vunmap_pud_range() 138 unsigned long next; in vunmap_p4d_range() local 143 next = p4d_addr_end(addr, end); in vunmap_p4d_range() [all …]
|
D | page_reporting.c | 116 struct page *page, *next; in page_reporting_cycle() local 146 list_for_each_entry_safe(page, next, list, lru) { in page_reporting_cycle() 158 next = page; in page_reporting_cycle() 165 next = page; in page_reporting_cycle() 206 next = list_first_entry(list, struct page, lru); in page_reporting_cycle() 214 if (&next->lru != list && !list_is_first(&next->lru, list)) in page_reporting_cycle() 215 list_rotate_to_front(&next->lru, list); in page_reporting_cycle()
|
D | dmapool.c | 69 char *next; in show_pools() local 73 next = buf; in show_pools() 76 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools() 78 next += temp; in show_pools() 93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", in show_pools() 98 next += temp; in show_pools() 209 unsigned int next = offset + pool->size; in pool_initialise_page() local 210 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page() 211 next = next_boundary; in pool_initialise_page() 214 *(int *)(page->vaddr + offset) = next; in pool_initialise_page() [all …]
|
D | ptdump.c | 29 unsigned long next, struct mm_walk *walk) in ptdump_pgd_entry() argument 50 unsigned long next, struct mm_walk *walk) in ptdump_p4d_entry() argument 71 unsigned long next, struct mm_walk *walk) in ptdump_pud_entry() argument 92 unsigned long next, struct mm_walk *walk) in ptdump_pmd_entry() argument 111 unsigned long next, struct mm_walk *walk) in ptdump_pte_entry() argument 124 static int ptdump_hole(unsigned long addr, unsigned long next, in ptdump_hole() argument
|
D | gup_benchmark.c | 71 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local 94 for (addr = gup->addr; addr < gup->addr + gup->size; addr = next) { in __gup_benchmark_ioctl() 98 next = addr + nr * PAGE_SIZE; in __gup_benchmark_ioctl() 99 if (next > gup->addr + gup->size) { in __gup_benchmark_ioctl() 100 next = gup->addr + gup->size; in __gup_benchmark_ioctl() 101 nr = (next - addr) / PAGE_SIZE; in __gup_benchmark_ioctl()
|
D | util.c | 281 struct vm_area_struct *next; in __vma_link_list() local 285 next = prev->vm_next; in __vma_link_list() 288 next = mm->mmap; in __vma_link_list() 291 vma->vm_next = next; in __vma_link_list() 292 if (next) in __vma_link_list() 293 next->vm_prev = vma; in __vma_link_list() 298 struct vm_area_struct *prev, *next; in __vma_unlink_list() local 300 next = vma->vm_next; in __vma_unlink_list() 303 prev->vm_next = next; in __vma_unlink_list() 305 mm->mmap = next; in __vma_unlink_list() [all …]
|
D | swapfile.c | 625 cluster->next = cluster_next(&cluster->index) * in scan_swap_map_try_ssd_cluster() 645 tmp = cluster->next; in scan_swap_map_try_ssd_cluster() 661 cluster->next = tmp + 1; in scan_swap_map_try_ssd_cluster() 759 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) in set_cluster_next() argument 764 si->cluster_next = next; in set_cluster_next() 775 (next >> SWAP_ADDRESS_SPACE_SHIFT)) { in set_cluster_next() 779 next = si->lowest_bit + in set_cluster_next() 781 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); in set_cluster_next() 782 next = max_t(unsigned int, next, si->lowest_bit); in set_cluster_next() 784 this_cpu_write(*si->cluster_next_cpu, next); in set_cluster_next() [all …]
|
D | gup.c | 2384 unsigned long next; in gup_huge_pd() local 2388 next = hugepte_addr_end(addr, end, sz); in gup_huge_pd() 2391 } while (ptep++, addr = next, addr != end); in gup_huge_pd() 2504 unsigned long next; in gup_pmd_range() local 2511 next = pmd_addr_end(addr, end); in gup_pmd_range() 2525 if (!gup_huge_pmd(pmd, pmdp, addr, next, flags, in gup_pmd_range() 2535 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range() 2537 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range() 2539 } while (pmdp++, addr = next, addr != end); in gup_pmd_range() 2547 unsigned long next; in gup_pud_range() local [all …]
|
D | kmemleak.c | 1311 void *next; in scan_large_block() local 1314 next = min(start + MAX_SCAN_SIZE, end); in scan_large_block() 1315 scan_block(start, next, NULL); in scan_large_block() 1316 start = next; in scan_large_block() 1345 void *next; in scan_object() local 1348 next = min(start + MAX_SCAN_SIZE, end); in scan_object() 1349 scan_block(start, next, object); in scan_object() 1351 start = next; in scan_object() 1381 object = list_entry(gray_list.next, typeof(*object), gray_list); in scan_gray_list() 1389 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list() [all …]
|
D | backing-dev.c | 496 blkcg_cgwb_list->next && memcg_cgwb_list->next) { in cgwb_create() 654 struct bdi_writeback *wb, *next; in wb_memcg_offline() local 657 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) in wb_memcg_offline() 659 memcg_cgwb_list->next = NULL; /* prevent new wb's */ in wb_memcg_offline() 671 struct bdi_writeback *wb, *next; in wb_blkcg_offline() local 674 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) in wb_blkcg_offline() 676 blkcg->cgwb_list.next = NULL; /* prevent new wb's */ in wb_blkcg_offline()
|
D | frontswap.c | 33 for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next) 140 ops->next = frontswap_ops; in frontswap_register_ops() 141 } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next); in frontswap_register_ops()
|
D | madvise.c | 336 unsigned long next = pmd_addr_end(addr, end); in madvise_cold_or_pageout_pte_range() local 362 if (next - addr != HPAGE_PMD_SIZE) { in madvise_cold_or_pageout_pte_range() 612 unsigned long next; in madvise_free_pte_range() local 614 next = pmd_addr_end(addr, end); in madvise_free_pte_range() 616 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range() 617 goto next; in madvise_free_pte_range() 730 next: in madvise_free_pte_range()
|
D | mremap.c | 394 unsigned long next, extent, mask, size; in get_extent() local 411 next = (old_addr + size) & mask; in get_extent() 413 extent = next - old_addr; in get_extent() 416 next = (new_addr + size) & mask; in get_extent() 417 if (extent > next - new_addr) in get_extent() 418 extent = next - new_addr; in get_extent()
|
/mm/damon/ |
D | vaddr.c | 68 struct damon_region *n = NULL, *next; in damon_va_evenly_split_region() local 82 next = damon_next_region(r); in damon_va_evenly_split_region() 88 damon_insert_region(n, r, next, t); in damon_va_evenly_split_region() 128 goto next; in __damon_va_three_regions() 135 goto next; in __damon_va_three_regions() 145 next: in __damon_va_three_regions() 308 struct damon_region *r, *next; in damon_va_apply_three_regions() local 312 damon_for_each_region_safe(r, next, t) { in damon_va_apply_three_regions() 371 unsigned long next, struct mm_walk *walk) in damon_mkold_pmd_entry() argument 503 unsigned long next, struct mm_walk *walk) in damon_young_pmd_entry() argument
|
D | core.c | 180 struct damon_region *r, *next; in damon_free_target() local 182 damon_for_each_region_safe(r, next, t) in damon_free_target() 263 struct damon_target *t, *next; in damon_set_targets() local 271 damon_for_each_target_safe(t, next, ctx) in damon_set_targets() 327 struct damos *s, *next; in damon_set_schemes() local 330 damon_for_each_scheme_safe(s, next, ctx) in damon_set_schemes() 759 struct damon_region *r, *prev = NULL, *next; in damon_merge_regions_of() local 761 damon_for_each_region_safe(r, next, t) { in damon_merge_regions_of() 824 struct damon_region *r, *next; in damon_split_regions_of() local 828 damon_for_each_region_safe(r, next, t) { in damon_split_regions_of() [all …]
|
/mm/kfence/ |
D | core.c | 186 enum kfence_object_state next) in metadata_update_state() argument 189 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; in metadata_update_state() 205 WRITE_ONCE(meta->state, next); in metadata_update_state() 267 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); in kfence_guarded_alloc() 563 .next = next_object,
|