Home
last modified time | relevance | path

Searched refs:i (Results 1 – 25 of 88) sorted by relevance

1234

/mm/
Dvmstat.c114 int i; in sum_vm_events() local
121 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) in sum_vm_events()
122 ret[i] += this->event[i]; in sum_vm_events()
148 int i; in vm_events_fold_cpu() local
150 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { in vm_events_fold_cpu()
151 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
152 fold_state->event[i] = 0; in vm_events_fold_cpu()
297 int i; in set_pgdat_percpu_threshold() local
299 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
300 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
[all …]
Dearly_ioremap.c77 int i; in early_ioremap_setup() local
79 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
80 if (WARN_ON(prev_map[i])) in early_ioremap_setup()
83 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
84 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); in early_ioremap_setup()
90 int i; in check_early_ioremap_leak() local
92 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in check_early_ioremap_leak()
93 if (prev_map[i]) in check_early_ioremap_leak()
112 int i, slot; in __early_ioremap() local
117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { in __early_ioremap()
[all …]
Dpercpu-vm.c57 int i; in pcpu_free_pages() local
60 for (i = page_start; i < page_end; i++) { in pcpu_free_pages()
61 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
86 int i; in pcpu_alloc_pages() local
91 for (i = page_start; i < page_end; i++) { in pcpu_alloc_pages()
92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
102 while (--i >= page_start) in pcpu_alloc_pages()
103 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
108 for (i = page_start; i < page_end; i++) in pcpu_alloc_pages()
109 __free_page(pages[pcpu_page_idx(tcpu, i)]); in pcpu_alloc_pages()
[all …]
Dlist_lru.c325 int i; in __memcg_destroy_list_lru_node() local
327 for (i = begin; i < end; i++) in __memcg_destroy_list_lru_node()
328 kfree(memcg_lrus->lru[i]); in __memcg_destroy_list_lru_node()
334 int i; in __memcg_init_list_lru_node() local
336 for (i = begin; i < end; i++) { in __memcg_init_list_lru_node()
344 memcg_lrus->lru[i] = l; in __memcg_init_list_lru_node()
348 __memcg_destroy_list_lru_node(memcg_lrus, begin, i); in __memcg_init_list_lru_node()
440 int i; in memcg_init_list_lru() local
447 for_each_node(i) { in memcg_init_list_lru()
448 if (memcg_init_list_lru_node(&lru->node[i])) in memcg_init_list_lru()
[all …]
Dslab_common.c110 size_t i; in __kmem_cache_free_bulk() local
112 for (i = 0; i < nr; i++) { in __kmem_cache_free_bulk()
114 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
116 kfree(p[i]); in __kmem_cache_free_bulk()
123 size_t i; in __kmem_cache_alloc_bulk() local
125 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
126 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
128 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
132 return i; in __kmem_cache_alloc_bulk()
737 unsigned int i; in setup_kmalloc_cache_index_table() local
[all …]
Dgup_benchmark.c27 unsigned long i; in put_back_pages() local
32 for (i = 0; i < nr_pages; i++) in put_back_pages()
33 put_page(pages[i]); in put_back_pages()
47 unsigned long i; in verify_dma_pinned() local
54 for (i = 0; i < nr_pages; i++) { in verify_dma_pinned()
55 page = pages[i]; in verify_dma_pinned()
57 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned()
71 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local
91 i = 0; in __gup_benchmark_ioctl()
110 pages + i); in __gup_benchmark_ioctl()
[all …]
Dtruncate.c63 int i, j; in truncate_exceptional_pvec_entries() local
82 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries()
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries()
84 pgoff_t index = indices[i]; in truncate_exceptional_pvec_entries()
298 int i; in truncate_inode_pages_range() local
337 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()
338 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
341 index = indices[i]; in truncate_inode_pages_range()
361 for (i = 0; i < pagevec_count(&locked_pvec); i++) in truncate_inode_pages_range()
362 truncate_cleanup_page(locked_pvec.pages[i]); in truncate_inode_pages_range()
[all …]
Dframe_vector.c138 int i; in frame_vector_to_pages() local
145 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
146 if (!pfn_valid(nums[i])) in frame_vector_to_pages()
149 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
150 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
164 int i; in frame_vector_to_pfns() local
172 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pfns()
173 nums[i] = page_to_pfn(pages[i]); in frame_vector_to_pfns()
Dmigrate.c445 int i; in migrate_page_move_mapping() local
447 for (i = 1; i < nr; i++) { in migrate_page_move_mapping()
544 int i; in __copy_gigantic_page() local
548 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page()
552 i++; in __copy_gigantic_page()
553 dst = mem_map_next(dst, dst_base, i); in __copy_gigantic_page()
554 src = mem_map_next(src, src_base, i); in __copy_gigantic_page()
560 int i; in copy_huge_page() local
578 for (i = 0; i < nr_pages; i++) { in copy_huge_page()
580 copy_highpage(dst + i, src + i); in copy_huge_page()
[all …]
Dmemblock.c143 #define for_each_memblock_type(i, memblock_type, rgn) \ argument
144 for (i = 0, rgn = &memblock_type->regions[0]; \
145 i < memblock_type->cnt; \
146 i++, rgn = &memblock_type->regions[i])
184 unsigned long i; in memblock_overlaps_region() local
188 for (i = 0; i < type->cnt; i++) in memblock_overlaps_region()
189 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region()
190 type->regions[i].size)) in memblock_overlaps_region()
192 return i < type->cnt; in memblock_overlaps_region()
216 u64 i; in __memblock_find_range_bottom_up() local
[all …]
Dhmm.c43 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() local
45 for (; addr < end; addr += PAGE_SIZE, i++) in hmm_pfns_fill()
46 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill()
125 unsigned long i; in hmm_range_need_fault() local
136 for (i = 0; i < npages; ++i) { in hmm_range_need_fault()
137 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], in hmm_range_need_fault()
151 unsigned long i, npages; in hmm_vma_walk_hole() local
154 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole()
156 hmm_pfns = &range->hmm_pfns[i]; in hmm_vma_walk_hole()
191 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local
[all …]
Dmemremap.c85 int i; in pgmap_pfn_valid() local
87 for (i = 0; i < pgmap->nr_range; i++) { in pgmap_pfn_valid()
88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid()
92 return pfn >= pfn_first(pgmap, i); in pgmap_pfn_valid()
112 #define for_each_device_pfn(pfn, map, i) \ argument
113 for (pfn = pfn_first(map, i); pfn < pfn_end(map, i); pfn = pfn_next(pfn))
171 int i; in memunmap_pages() local
174 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
175 for_each_device_pfn(pfn, pgmap, i) in memunmap_pages()
179 for (i = 0; i < pgmap->nr_range; i++) in memunmap_pages()
[all …]
Dhighmem.c155 int i = PKMAP_NR(addr); in kmap_to_page() local
156 return pte_page(pkmap_page_table[i]); in kmap_to_page()
165 int i; in flush_all_zero_pkmaps() local
170 for (i = 0; i < LAST_PKMAP; i++) { in flush_all_zero_pkmaps()
179 if (pkmap_count[i] != 1) in flush_all_zero_pkmaps()
181 pkmap_count[i] = 0; in flush_all_zero_pkmaps()
184 BUG_ON(pte_none(pkmap_page_table[i])); in flush_all_zero_pkmaps()
193 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
194 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); in flush_all_zero_pkmaps()
476 int i; in page_address_init() local
[all …]
Dzsmalloc.c588 int i; in zs_stats_size_show() local
603 for (i = 0; i < ZS_SIZE_CLASSES; i++) { in zs_stats_size_show()
604 class = pool->size_class[i]; in zs_stats_size_show()
606 if (class->index != i) in zs_stats_size_show()
623 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
799 int i, max_usedpc = 0; in get_pages_per_zspage() local
803 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { in get_pages_per_zspage()
807 zspage_size = i * PAGE_SIZE; in get_pages_per_zspage()
813 max_usedpc_order = i; in get_pages_per_zspage()
1026 int i; in create_page_chain() local
[all …]
Dslab.c286 #define STATS_SET_FREEABLE(x, i) \ argument
288 if ((x)->max_freeable < i) \
289 (x)->max_freeable = i; \
306 #define STATS_SET_FREEABLE(x, i) do { } while (0) argument
662 int i; in alloc_alien_cache() local
670 for_each_node(i) { in alloc_alien_cache()
671 if (i == node || !node_online(i)) in alloc_alien_cache()
673 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache()
674 if (!alc_ptr[i]) { in alloc_alien_cache()
675 for (i--; i >= 0; i--) in alloc_alien_cache()
[all …]
Dswap_state.c73 unsigned int i, j, nr; in total_swapcache_pages() local
78 for (i = 0; i < MAX_SWAPFILES; i++) { in total_swapcache_pages()
79 swp_entry_t entry = swp_entry(i, 1); in total_swapcache_pages()
88 nr = nr_swapper_spaces[i]; in total_swapcache_pages()
89 spaces = swapper_spaces[i]; in total_swapcache_pages()
135 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache() local
152 for (i = 0; i < nr; i++) { in add_to_swap_cache()
153 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
160 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
188 int i, nr = thp_nr_pages(page); in __delete_from_swap_cache() local
[all …]
Dslab.h367 size_t i; in memcg_slab_post_alloc_hook() local
372 for (i = 0; i < size; i++) { in memcg_slab_post_alloc_hook()
373 if (likely(p[i])) { in memcg_slab_post_alloc_hook()
374 page = virt_to_head_page(p[i]); in memcg_slab_post_alloc_hook()
382 off = obj_to_index(s, page, p[i]); in memcg_slab_post_alloc_hook()
401 int i; in memcg_slab_free_hook() local
406 for (i = 0; i < objects; i++) { in memcg_slab_free_hook()
407 if (unlikely(!p[i])) in memcg_slab_free_hook()
410 page = virt_to_head_page(p[i]); in memcg_slab_free_hook()
419 off = obj_to_index(s, page, p[i]); in memcg_slab_free_hook()
[all …]
Dmemcontrol.c1573 int i; in memory_stats_init() local
1575 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { in memory_stats_init()
1577 if (memory_stats[i].idx == NR_ANON_THPS) in memory_stats_init()
1578 memory_stats[i].ratio = HPAGE_PMD_SIZE; in memory_stats_init()
1580 VM_BUG_ON(!memory_stats[i].ratio); in memory_stats_init()
1581 VM_BUG_ON(memory_stats[i].idx >= MEMCG_NR_STAT); in memory_stats_init()
1591 int i; in memory_stat_format() local
1608 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { in memory_stat_format()
1611 size = memcg_page_state(memcg, memory_stats[i].idx); in memory_stat_format()
1612 size *= memory_stats[i].ratio; in memory_stat_format()
[all …]
Dpage_poison.c35 int i; in __kernel_poison_pages() local
37 for (i = 0; i < n; i++) in __kernel_poison_pages()
38 poison_page(page + i); in __kernel_poison_pages()
93 int i; in __kernel_unpoison_pages() local
95 for (i = 0; i < n; i++) in __kernel_unpoison_pages()
96 unpoison_page(page + i); in __kernel_unpoison_pages()
/mm/damon/
Dvaddr-test.h19 int i, j; in __link_vmas() local
25 for (i = 0; i < nr_vmas - 1; i++) { in __link_vmas()
26 vmas[i].vm_next = &vmas[i + 1]; in __link_vmas()
28 vmas[i].vm_rb.rb_left = NULL; in __link_vmas()
29 vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb; in __link_vmas()
32 for (j = i; j < nr_vmas; j++) { in __link_vmas()
39 vmas[i].rb_subtree_gap = largest_gap; in __link_vmas()
41 vmas[i].vm_next = NULL; in __link_vmas()
42 vmas[i].vm_rb.rb_right = NULL; in __link_vmas()
43 vmas[i].rb_subtree_gap = 0; in __link_vmas()
[all …]
Ddbgfs-test.h20 ssize_t nr_integers = 0, i; in damon_dbgfs_test_str_to_target_ids() local
46 for (i = 0; i < nr_integers; i++) in damon_dbgfs_test_str_to_target_ids()
47 KUNIT_EXPECT_EQ(test, expected[i], answers[i]); in damon_dbgfs_test_str_to_target_ids()
54 for (i = 0; i < nr_integers; i++) in damon_dbgfs_test_str_to_target_ids()
55 KUNIT_EXPECT_EQ(test, expected[i], answers[i]); in damon_dbgfs_test_str_to_target_ids()
62 for (i = 0; i < 2; i++) in damon_dbgfs_test_str_to_target_ids()
63 KUNIT_EXPECT_EQ(test, expected[i], answers[i]); in damon_dbgfs_test_str_to_target_ids()
130 int i, rc; in damon_dbgfs_test_set_init_regions() local
136 for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) { in damon_dbgfs_test_set_init_regions()
137 input = valid_inputs[i]; in damon_dbgfs_test_set_init_regions()
[all …]
Dcore-test.h144 int i; in damon_test_merge_two() local
159 i = 0; in damon_test_merge_two()
162 i++; in damon_test_merge_two()
164 KUNIT_EXPECT_EQ(test, i, 1); in damon_test_merge_two()
172 unsigned int i = 0; in __nth_region_of() local
175 if (i++ == idx) in __nth_region_of()
192 int i; in damon_test_merge_regions_of() local
195 for (i = 0; i < ARRAY_SIZE(sa); i++) { in damon_test_merge_regions_of()
196 r = damon_new_region(sa[i], ea[i]); in damon_test_merge_regions_of()
197 r->nr_accesses = nrs[i]; in damon_test_merge_regions_of()
[all …]
Ddbgfs.c156 ssize_t i; in free_schemes_arr() local
158 for (i = 0; i < nr_schemes; i++) in free_schemes_arr()
159 kfree(schemes[i]); in free_schemes_arr()
355 int i; in dbgfs_put_pids() local
357 for (i = 0; i < nr_ids; i++) in dbgfs_put_pids()
358 put_pid((struct pid *)ids[i]); in dbgfs_put_pids()
371 int i; in dbgfs_target_ids_write() local
390 for (i = 0; i < nr_targets; i++) { in dbgfs_target_ids_write()
391 targets[i] = (unsigned long)find_get_pid( in dbgfs_target_ids_write()
392 (int)targets[i]); in dbgfs_target_ids_write()
[all …]
/mm/kfence/
Dkfence_test.c544 int i; in test_init_on_free() local
552 for (i = 0; i < size; i++) in test_init_on_free()
553 expect.addr[i] = i + 1; in test_init_on_free()
556 for (i = 0; i < size; i++) { in test_init_on_free()
562 KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0); in test_init_on_free()
564 if (!i) /* Only check first access to not fail test if page is ever re-protected. */ in test_init_on_free()
574 int i; in test_memcache_ctor() local
579 for (i = 0; i < 8; i++) in test_memcache_ctor()
580 KUNIT_EXPECT_EQ(test, buf[i], (char)'x'); in test_memcache_ctor()
592 int i; in test_gfpzero() local
[all …]
Dcore.c428 int i; in kfence_init_pool() local
446 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { in kfence_init_pool()
447 if (!i || (i % 2)) in kfence_init_pool()
451 if (WARN_ON(compound_head(&pages[i]) != &pages[i])) in kfence_init_pool()
454 __SetPageSlab(&pages[i]); in kfence_init_pool()
463 for (i = 0; i < 2; i++) { in kfence_init_pool()
470 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { in kfence_init_pool()
471 struct kfence_metadata *meta = &kfence_metadata[i]; in kfence_init_pool()
514 int i; in stats_show() local
517 for (i = 0; i < KFENCE_COUNTER_COUNT; i++) in stats_show()
[all …]

1234