/mm/damon/ |
D | vaddr-test.h | 19 int i, j; in __link_vmas() local 32 for (j = i; j < nr_vmas; j++) { in __link_vmas() 33 if (j == 0) in __link_vmas() 35 gap = vmas[j].vm_start - vmas[j - 1].vm_end; in __link_vmas()
|
D | dbgfs.c | 790 int i, j; in dbgfs_rm_context() local 824 for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { in dbgfs_rm_context() 830 new_dirs[j] = dbgfs_dirs[i]; in dbgfs_rm_context() 831 new_ctxs[j++] = dbgfs_ctxs[i]; in dbgfs_rm_context()
|
/mm/ |
D | truncate.c | 63 int i, j; in truncate_exceptional_pvec_entries() local 70 for (j = 0; j < pagevec_count(pvec); j++) in truncate_exceptional_pvec_entries() 71 if (xa_is_value(pvec->pages[j])) in truncate_exceptional_pvec_entries() 74 if (j == pagevec_count(pvec)) in truncate_exceptional_pvec_entries() 78 lock = !dax && indices[j] < end; in truncate_exceptional_pvec_entries() 82 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries() 87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries() 104 pvec->nr = j; in truncate_exceptional_pvec_entries()
|
D | shuffle.c | 92 unsigned long j; in __shuffle_zone() local 113 j = z->zone_start_pfn + in __shuffle_zone() 116 page_j = shuffle_valid_page(z, j, order); in __shuffle_zone() 138 pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j); in __shuffle_zone()
|
D | page_alloc.c | 1903 u64 j; in deferred_init_mem_pfn_range_in_zone() local 1910 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone() 1915 *i = j; in deferred_init_mem_pfn_range_in_zone() 1939 u64 j = *i; in deferred_init_maxorder() local 1942 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder() 1958 swap(j, *i); in deferred_init_maxorder() 1960 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder() 6547 int i, j, zone_id, nid; in memmap_init() local 6552 for (j = 0; j < MAX_NR_ZONES; j++) { in memmap_init() 6553 struct zone *zone = node->node_zones + j; in memmap_init() [all …]
|
D | swap_state.c | 73 unsigned int i, j, nr; in total_swapcache_pages() local 90 for (j = 0; j < nr; j++) in total_swapcache_pages() 91 ret += spaces[j].nrpages; in total_swapcache_pages()
|
D | swap.c | 1248 int i, j; in pagevec_remove_exceptionals() local 1250 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { in pagevec_remove_exceptionals() 1253 pvec->pages[j++] = page; in pagevec_remove_exceptionals() 1255 pvec->nr = j; in pagevec_remove_exceptionals()
|
D | percpu.c | 2975 int unit, i, j, rc = 0; in pcpu_page_first_chunk() local 3003 j = 0; in pcpu_page_first_chunk() 3017 pages[j++] = virt_to_page(ptr); in pcpu_page_first_chunk() 3060 while (--j >= 0) in pcpu_page_first_chunk() 3061 free_fn(page_address(pages[j]), PAGE_SIZE); in pcpu_page_first_chunk()
|
D | mmap.c | 351 int i = 0, j, bug = 0; in browse_rb() local 386 j = 0; in browse_rb() 388 j++; in browse_rb() 389 if (i != j) { in browse_rb() 390 pr_emerg("backwards %d, forwards %d\n", j, i); in browse_rb()
|
D | swapfile.c | 3115 unsigned int j, k; in setup_swap_map_and_extents() local 3174 j = (k + col) % SWAP_CLUSTER_COLS; in setup_swap_map_and_extents() 3176 idx = i * SWAP_CLUSTER_COLS + j; in setup_swap_map_and_extents()
|
D | memcontrol.c | 4423 int i, j, size, entries; in __mem_cgroup_usage_unregister_event() local 4468 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event() 4472 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event() 4473 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event() 4481 j++; in __mem_cgroup_usage_unregister_event()
|