Home
last modified time | relevance | path

Searched refs:i (Results 1 – 25 of 73) sorted by relevance

123

/mm/
Dvmstat.c114 int i; in sum_vm_events() local
121 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) in sum_vm_events()
122 ret[i] += this->event[i]; in sum_vm_events()
148 int i; in vm_events_fold_cpu() local
150 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { in vm_events_fold_cpu()
151 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
152 fold_state->event[i] = 0; in vm_events_fold_cpu()
297 int i; in set_pgdat_percpu_threshold() local
299 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
300 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
[all …]
Dhmm.c262 unsigned long i; in hmm_pfns_bad() local
264 i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_bad()
265 for (; addr < end; addr += PAGE_SIZE, i++) in hmm_pfns_bad()
266 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad()
290 unsigned long i; in hmm_vma_walk_hole_() local
293 i = (addr - range->start) >> PAGE_SHIFT; in hmm_vma_walk_hole_()
298 for (; addr < end; addr += PAGE_SIZE, i++) { in hmm_vma_walk_hole_()
299 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_()
304 &pfns[i]); in hmm_vma_walk_hole_()
362 unsigned long i; in hmm_range_need_fault() local
[all …]
Dpercpu-vm.c57 int i; in pcpu_free_pages() local
60 for (i = page_start; i < page_end; i++) { in pcpu_free_pages()
61 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
86 int i; in pcpu_alloc_pages() local
91 for (i = page_start; i < page_end; i++) { in pcpu_alloc_pages()
92 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
102 while (--i >= page_start) in pcpu_alloc_pages()
103 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
108 for (i = page_start; i < page_end; i++) in pcpu_alloc_pages()
109 __free_page(pages[pcpu_page_idx(tcpu, i)]); in pcpu_alloc_pages()
[all …]
Dearly_ioremap.c77 int i; in early_ioremap_setup() local
79 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
80 if (WARN_ON(prev_map[i])) in early_ioremap_setup()
83 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
84 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); in early_ioremap_setup()
90 int i; in check_early_ioremap_leak() local
92 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in check_early_ioremap_leak()
93 if (prev_map[i]) in check_early_ioremap_leak()
112 int i, slot; in __early_ioremap() local
117 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { in __early_ioremap()
[all …]
Dlist_lru.c335 int i; in __memcg_destroy_list_lru_node() local
337 for (i = begin; i < end; i++) in __memcg_destroy_list_lru_node()
338 kfree(memcg_lrus->lru[i]); in __memcg_destroy_list_lru_node()
344 int i; in __memcg_init_list_lru_node() local
346 for (i = begin; i < end; i++) { in __memcg_init_list_lru_node()
354 memcg_lrus->lru[i] = l; in __memcg_init_list_lru_node()
358 __memcg_destroy_list_lru_node(memcg_lrus, begin, i); in __memcg_init_list_lru_node()
450 int i; in memcg_init_list_lru() local
457 for_each_node(i) { in memcg_init_list_lru()
458 if (memcg_init_list_lru_node(&lru->node[i])) in memcg_init_list_lru()
[all …]
Dframe_vector.c125 int i; in put_vaddr_frames() local
138 for (i = 0; i < vec->nr_frames; i++) in put_vaddr_frames()
139 put_page(pages[i]); in put_vaddr_frames()
156 int i; in frame_vector_to_pages() local
163 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
164 if (!pfn_valid(nums[i])) in frame_vector_to_pages()
167 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
168 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
182 int i; in frame_vector_to_pfns() local
190 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pfns()
[all …]
Dtruncate.c63 int i, j; in truncate_exceptional_pvec_entries() local
82 for (i = j; i < pagevec_count(pvec); i++) { in truncate_exceptional_pvec_entries()
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries()
84 pgoff_t index = indices[i]; in truncate_exceptional_pvec_entries()
301 int i; in truncate_inode_pages_range() local
340 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()
341 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
344 index = indices[i]; in truncate_inode_pages_range()
364 for (i = 0; i < pagevec_count(&locked_pvec); i++) in truncate_inode_pages_range()
365 truncate_cleanup_page(mapping, locked_pvec.pages[i]); in truncate_inode_pages_range()
[all …]
Dmigrate.c459 int i; in migrate_page_move_mapping() local
461 for (i = 1; i < HPAGE_PMD_NR; i++) { in migrate_page_move_mapping()
551 int i; in __copy_gigantic_page() local
555 for (i = 0; i < nr_pages; ) { in __copy_gigantic_page()
559 i++; in __copy_gigantic_page()
560 dst = mem_map_next(dst, dst_base, i); in __copy_gigantic_page()
561 src = mem_map_next(src, src_base, i); in __copy_gigantic_page()
567 int i; in copy_huge_page() local
585 for (i = 0; i < nr_pages; i++) { in copy_huge_page()
587 copy_highpage(dst + i, src + i); in copy_huge_page()
[all …]
Dswap_state.c73 unsigned int i, j, nr; in total_swapcache_pages() local
78 for (i = 0; i < MAX_SWAPFILES; i++) { in total_swapcache_pages()
79 swp_entry_t entry = swp_entry(i, 1); in total_swapcache_pages()
88 nr = nr_swapper_spaces[i]; in total_swapcache_pages()
89 spaces = swapper_spaces[i]; in total_swapcache_pages()
119 unsigned long i, nr = compound_nr(page); in add_to_swap_cache() local
133 for (i = 0; i < nr; i++) { in add_to_swap_cache()
134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
135 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
161 int i, nr = hpage_nr_pages(page); in __delete_from_swap_cache() local
[all …]
Dslab_common.c106 size_t i; in __kmem_cache_free_bulk() local
108 for (i = 0; i < nr; i++) { in __kmem_cache_free_bulk()
110 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
112 kfree(p[i]); in __kmem_cache_free_bulk()
119 size_t i; in __kmem_cache_alloc_bulk() local
121 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
122 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
124 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
128 return i; in __kmem_cache_alloc_bulk()
839 int i; in shutdown_memcg_caches() local
[all …]
Dslab.c285 #define STATS_SET_FREEABLE(x, i) \ argument
287 if ((x)->max_freeable < i) \
288 (x)->max_freeable = i; \
305 #define STATS_SET_FREEABLE(x, i) do { } while (0) argument
651 int i; in alloc_alien_cache() local
659 for_each_node(i) { in alloc_alien_cache()
660 if (i == node || !node_online(i)) in alloc_alien_cache()
662 alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); in alloc_alien_cache()
663 if (!alc_ptr[i]) { in alloc_alien_cache()
664 for (i--; i >= 0; i--) in alloc_alien_cache()
[all …]
Dmemblock.c165 unsigned long i; in memblock_overlaps_region() local
167 for (i = 0; i < type->cnt; i++) in memblock_overlaps_region()
168 if (memblock_addrs_overlap(base, size, type->regions[i].base, in memblock_overlaps_region()
169 type->regions[i].size)) in memblock_overlaps_region()
171 return i < type->cnt; in memblock_overlaps_region()
195 u64 i; in __memblock_find_range_bottom_up() local
197 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up()
230 u64 i; in __memblock_find_range_top_down() local
232 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, in __memblock_find_range_top_down()
513 int i = 0; in memblock_merge_regions() local
[all …]
Dhighmem.c155 int i = PKMAP_NR(addr); in kmap_to_page() local
156 return pte_page(pkmap_page_table[i]); in kmap_to_page()
165 int i; in flush_all_zero_pkmaps() local
170 for (i = 0; i < LAST_PKMAP; i++) { in flush_all_zero_pkmaps()
179 if (pkmap_count[i] != 1) in flush_all_zero_pkmaps()
181 pkmap_count[i] = 0; in flush_all_zero_pkmaps()
184 BUG_ON(pte_none(pkmap_page_table[i])); in flush_all_zero_pkmaps()
193 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
194 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); in flush_all_zero_pkmaps()
476 int i; in page_address_init() local
[all …]
Dzsmalloc.c592 int i; in zs_stats_size_show() local
607 for (i = 0; i < ZS_SIZE_CLASSES; i++) { in zs_stats_size_show()
608 class = pool->size_class[i]; in zs_stats_size_show()
610 if (class->index != i) in zs_stats_size_show()
627 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
803 int i, max_usedpc = 0; in get_pages_per_zspage() local
807 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { in get_pages_per_zspage()
811 zspage_size = i * PAGE_SIZE; in get_pages_per_zspage()
817 max_usedpc_order = i; in get_pages_per_zspage()
1030 int i; in create_page_chain() local
[all …]
Dhuge_memory.c1214 int i; in do_huge_pmd_wp_page_fallback() local
1226 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback()
1227 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, in do_huge_pmd_wp_page_fallback()
1229 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1230 mem_cgroup_try_charge_delay(pages[i], vma->vm_mm, in do_huge_pmd_wp_page_fallback()
1232 if (pages[i]) in do_huge_pmd_wp_page_fallback()
1233 put_page(pages[i]); in do_huge_pmd_wp_page_fallback()
1234 while (--i >= 0) { in do_huge_pmd_wp_page_fallback()
1235 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback()
1236 set_page_private(pages[i], 0); in do_huge_pmd_wp_page_fallback()
[all …]
Dmemcontrol.c1155 int i; in __invalidate_reclaim_iterators() local
1159 for (i = 0; i <= DEF_PRIORITY; i++) { in __invalidate_reclaim_iterators()
1160 iter = &mz->iter[i]; in __invalidate_reclaim_iterators()
1383 int i; in memory_stat_format() local
1440 for (i = 0; i < NR_LRU_LISTS; i++) in memory_stat_format()
1441 seq_buf_printf(&s, "%s %llu\n", mem_cgroup_lru_names[i], in memory_stat_format()
1442 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * in memory_stat_format()
2302 int i; in memcg_hotplug_cpu_dead() local
2304 for (i = 0; i < MEMCG_NR_STAT; i++) { in memcg_hotplug_cpu_dead()
2308 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); in memcg_hotplug_cpu_dead()
[all …]
Dpage_owner.c107 unsigned int i; in check_recursive_alloc() local
109 for (i = 0; i < nr_entries; i++) { in check_recursive_alloc()
110 if (entries[i] == ip) in check_recursive_alloc()
144 int i; in __reset_page_owner() local
154 for (i = 0; i < (1 << order); i++) { in __reset_page_owner()
167 int i; in __set_page_owner_handle() local
169 for (i = 0; i < (1 << order); i++) { in __set_page_owner_handle()
209 int i; in __split_page_owner() local
216 for (i = 0; i < (1 << order); i++) { in __split_page_owner()
263 int i; in pagetypeinfo_showmixedcount_print() local
[all …]
Dpage_poison.c53 int i; in poison_pages() local
55 for (i = 0; i < n; i++) in poison_pages()
56 poison_page(page + i); in poison_pages()
112 int i; in unpoison_pages() local
114 for (i = 0; i < n; i++) in unpoison_pages()
115 unpoison_page(page + i); in unpoison_pages()
Dgup_benchmark.c26 unsigned long i, nr_pages, addr, next; in __gup_benchmark_ioctl() local
39 i = 0; in __gup_benchmark_ioctl()
55 pages + i); in __gup_benchmark_ioctl()
60 pages + i, NULL); in __gup_benchmark_ioctl()
63 nr = get_user_pages(addr, nr, gup->flags & 1, pages + i, in __gup_benchmark_ioctl()
74 i += nr; in __gup_benchmark_ioctl()
82 for (i = 0; i < nr_pages; i++) { in __gup_benchmark_ioctl()
83 if (!pages[i]) in __gup_benchmark_ioctl()
85 put_page(pages[i]); in __gup_benchmark_ioctl()
Dpage_ext.c76 int i; in invoke_need_callbacks() local
80 for (i = 0; i < entries; i++) { in invoke_need_callbacks()
81 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) { in invoke_need_callbacks()
82 page_ext_ops[i]->offset = page_ext_size; in invoke_need_callbacks()
83 page_ext_size += page_ext_ops[i]->size; in invoke_need_callbacks()
93 int i; in invoke_init_callbacks() local
96 for (i = 0; i < entries; i++) { in invoke_init_callbacks()
97 if (page_ext_ops[i]->init) in invoke_init_callbacks()
98 page_ext_ops[i]->init(); in invoke_init_callbacks()
Dcma.c102 unsigned i = cma->count >> pageblock_order; in cma_activate_area() local
131 } while (--i); in cma_activate_area()
151 int i; in cma_init_reserved_areas() local
153 for (i = 0; i < cma_area_count; i++) { in cma_init_reserved_areas()
154 int ret = cma_activate_area(&cma_areas[i]); in cma_init_reserved_areas()
426 size_t i; in cma_alloc() local
492 for (i = 0; i < count; i++) in cma_alloc()
493 page_kasan_tag_reset(page + i); in cma_alloc()
543 int i; in cma_for_each_area() local
545 for (i = 0; i < cma_area_count; i++) { in cma_for_each_area()
[all …]
Dpage_alloc.c691 int i; in prep_compound_page() local
697 for (i = 1; i < nr_pages; i++) { in prep_compound_page()
698 struct page *p = page + i; in prep_compound_page()
1127 int i; in kernel_init_free_pages() local
1129 for (i = 0; i < numpages; i++) in kernel_init_free_pages()
1130 clear_highpage(page + i); in kernel_init_free_pages()
1148 int i; in free_pages_prepare() local
1154 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1156 bad += free_tail_pages_check(page, page + i); in free_pages_prepare()
1157 if (unlikely(free_pages_check(page + i))) { in free_pages_prepare()
[all …]
Dswapfile.c933 unsigned long offset, i; in swap_alloc_cluster() local
955 for (i = 0; i < SWAPFILE_CLUSTER; i++) in swap_alloc_cluster()
956 map[i] = SWAP_HAS_CACHE; in swap_alloc_cluster()
1332 unsigned int i, free_entries = 0; in put_swap_page() local
1344 for (i = 0; i < SWAPFILE_CLUSTER; i++) { in put_swap_page()
1345 val = map[i]; in put_swap_page()
1360 for (i = 0; i < size; i++, entry.val++) { in put_swap_page()
1361 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { in put_swap_page()
1364 if (i == size - 1) in put_swap_page()
1399 int i; in swapcache_free_entries() local
[all …]
Dvmacache.c65 int i; in vmacache_find() local
72 for (i = 0; i < VMACACHE_SIZE; i++) { in vmacache_find()
98 int i; in vmacache_find_exact() local
105 for (i = 0; i < VMACACHE_SIZE; i++) { in vmacache_find_exact()
/mm/kasan/
Dinit.c300 int i; in kasan_free_pte() local
302 for (i = 0; i < PTRS_PER_PTE; i++) { in kasan_free_pte()
303 pte = pte_start + i; in kasan_free_pte()
315 int i; in kasan_free_pmd() local
317 for (i = 0; i < PTRS_PER_PMD; i++) { in kasan_free_pmd()
318 pmd = pmd_start + i; in kasan_free_pmd()
330 int i; in kasan_free_pud() local
332 for (i = 0; i < PTRS_PER_PUD; i++) { in kasan_free_pud()
333 pud = pud_start + i; in kasan_free_pud()
345 int i; in kasan_free_p4d() local
[all …]

123