Home
last modified time | relevance | path

Searched refs:i (Results 1 – 25 of 63) sorted by relevance

123

/mm/
Dlist_lru.c293 int i; in __memcg_destroy_list_lru_node() local
295 for (i = begin; i < end; i++) in __memcg_destroy_list_lru_node()
296 kfree(memcg_lrus->lru[i]); in __memcg_destroy_list_lru_node()
302 int i; in __memcg_init_list_lru_node() local
304 for (i = begin; i < end; i++) { in __memcg_init_list_lru_node()
312 memcg_lrus->lru[i] = l; in __memcg_init_list_lru_node()
316 __memcg_destroy_list_lru_node(memcg_lrus, begin, i); in __memcg_init_list_lru_node()
386 int i; in memcg_init_list_lru() local
393 for_each_node(i) { in memcg_init_list_lru()
394 if (memcg_init_list_lru_node(&lru->node[i])) in memcg_init_list_lru()
[all …]
Dearly_ioremap.c69 int i; in early_ioremap_setup() local
71 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
72 if (WARN_ON(prev_map[i])) in early_ioremap_setup()
75 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in early_ioremap_setup()
76 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); in early_ioremap_setup()
82 int i; in check_early_ioremap_leak() local
84 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) in check_early_ioremap_leak()
85 if (prev_map[i]) in check_early_ioremap_leak()
104 int i, slot; in __early_ioremap() local
109 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { in __early_ioremap()
[all …]
Dpercpu-vm.c59 int i; in pcpu_free_pages() local
62 for (i = page_start; i < page_end; i++) { in pcpu_free_pages()
63 struct page *page = pages[pcpu_page_idx(cpu, i)]; in pcpu_free_pages()
87 int i; in pcpu_alloc_pages() local
90 for (i = page_start; i < page_end; i++) { in pcpu_alloc_pages()
91 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; in pcpu_alloc_pages()
101 while (--i >= page_start) in pcpu_alloc_pages()
102 __free_page(pages[pcpu_page_idx(cpu, i)]); in pcpu_alloc_pages()
107 for (i = page_start; i < page_end; i++) in pcpu_alloc_pages()
108 __free_page(pages[pcpu_page_idx(tcpu, i)]); in pcpu_alloc_pages()
[all …]
Dvmstat.c40 int i; in sum_vm_events() local
47 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) in sum_vm_events()
48 ret[i] += this->event[i]; in sum_vm_events()
74 int i; in vm_events_fold_cpu() local
76 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { in vm_events_fold_cpu()
77 count_vm_events(i, fold_state->event[i]); in vm_events_fold_cpu()
78 fold_state->event[i] = 0; in vm_events_fold_cpu()
202 int i; in set_pgdat_percpu_threshold() local
204 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
205 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
[all …]
Dframe_vector.c108 int i; in put_vaddr_frames() local
121 for (i = 0; i < vec->nr_frames; i++) in put_vaddr_frames()
122 put_page(pages[i]); in put_vaddr_frames()
139 int i; in frame_vector_to_pages() local
146 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
147 if (!pfn_valid(nums[i])) in frame_vector_to_pages()
150 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pages()
151 pages[i] = pfn_to_page(nums[i]); in frame_vector_to_pages()
165 int i; in frame_vector_to_pfns() local
173 for (i = 0; i < vec->nr_frames; i++) in frame_vector_to_pfns()
[all …]
Dzsmalloc.c500 int i; in zs_stats_size_show() local
514 for (i = 0; i < zs_size_classes; i++) { in zs_stats_size_show()
515 class = pool->size_class[i]; in zs_stats_size_show()
517 if (class->index != i) in zs_stats_size_show()
533 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
750 int i, max_usedpc = 0; in get_pages_per_zspage() local
754 for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { in get_pages_per_zspage()
758 zspage_size = i * PAGE_SIZE; in get_pages_per_zspage()
764 max_usedpc_order = i; in get_pages_per_zspage()
921 unsigned int i = 1; in init_zspage() local
[all …]
Dmemblock.c97 unsigned long i; in memblock_overlaps_region() local
99 for (i = 0; i < type->cnt; i++) { in memblock_overlaps_region()
100 phys_addr_t rgnbase = type->regions[i].base; in memblock_overlaps_region()
101 phys_addr_t rgnsize = type->regions[i].size; in memblock_overlaps_region()
106 return i < type->cnt; in memblock_overlaps_region()
129 u64 i; in __memblock_find_range_bottom_up() local
131 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up()
163 u64 i; in __memblock_find_range_top_down() local
165 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, in __memblock_find_range_top_down()
419 int i = 0; in memblock_merge_regions() local
[all …]
Dslab_common.c109 size_t i; in __kmem_cache_free_bulk() local
111 for (i = 0; i < nr; i++) in __kmem_cache_free_bulk()
112 kmem_cache_free(s, p[i]); in __kmem_cache_free_bulk()
118 size_t i; in __kmem_cache_alloc_bulk() local
120 for (i = 0; i < nr; i++) { in __kmem_cache_alloc_bulk()
121 void *x = p[i] = kmem_cache_alloc(s, flags); in __kmem_cache_alloc_bulk()
123 __kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
127 return i; in __kmem_cache_alloc_bulk()
642 int i; in shutdown_memcg_caches() local
652 for_each_memcg_cache_index(i) { in shutdown_memcg_caches()
[all …]
Dpercpu.c333 static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) in pcpu_count_occupied_pages() argument
335 int off = chunk->map[i] & ~1; in pcpu_count_occupied_pages()
336 int end = chunk->map[i + 1] & ~1; in pcpu_count_occupied_pages()
338 if (!PAGE_ALIGNED(off) && i > 0) { in pcpu_count_occupied_pages()
339 int prev = chunk->map[i - 1]; in pcpu_count_occupied_pages()
345 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { in pcpu_count_occupied_pages()
346 int next = chunk->map[i + 1]; in pcpu_count_occupied_pages()
347 int nend = chunk->map[i + 2] & ~1; in pcpu_count_occupied_pages()
559 int i, off; in pcpu_alloc_area() local
563 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { in pcpu_alloc_area()
[all …]
Dhighmem.c159 int i = PKMAP_NR(addr); in kmap_to_page() local
160 return pte_page(pkmap_page_table[i]); in kmap_to_page()
169 int i; in flush_all_zero_pkmaps() local
174 for (i = 0; i < LAST_PKMAP; i++) { in flush_all_zero_pkmaps()
183 if (pkmap_count[i] != 1) in flush_all_zero_pkmaps()
185 pkmap_count[i] = 0; in flush_all_zero_pkmaps()
188 BUG_ON(pte_none(pkmap_page_table[i])); in flush_all_zero_pkmaps()
197 page = pte_page(pkmap_page_table[i]); in flush_all_zero_pkmaps()
198 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); in flush_all_zero_pkmaps()
480 int i; in page_address_init() local
[all …]
Dkmemcheck.c12 int i; in kmemcheck_alloc_shadow() local
27 for(i = 0; i < pages; ++i) in kmemcheck_alloc_shadow()
28 page[i].shadow = page_address(&shadow[i]); in kmemcheck_alloc_shadow()
42 int i; in kmemcheck_free_shadow() local
53 for(i = 0; i < pages; ++i) in kmemcheck_free_shadow()
54 page[i].shadow = NULL; in kmemcheck_free_shadow()
Dtruncate.c228 int i; in truncate_inode_pages_range() local
260 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()
261 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
264 index = indices[i]; in truncate_inode_pages_range()
346 for (i = 0; i < pagevec_count(&pvec); i++) { in truncate_inode_pages_range()
347 struct page *page = pvec.pages[i]; in truncate_inode_pages_range()
350 index = indices[i]; in truncate_inode_pages_range()
466 int i; in invalidate_mapping_pages() local
472 for (i = 0; i < pagevec_count(&pvec); i++) { in invalidate_mapping_pages()
473 struct page *page = pvec.pages[i]; in invalidate_mapping_pages()
[all …]
Dvmacache.c50 int i; in vmacache_find() local
57 for (i = 0; i < VMACACHE_SIZE; i++) { in vmacache_find()
58 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find()
78 int i; in vmacache_find_exact() local
85 for (i = 0; i < VMACACHE_SIZE; i++) { in vmacache_find_exact()
86 struct vm_area_struct *vma = current->vmacache[i]; in vmacache_find_exact()
Dslab.c313 #define STATS_SET_FREEABLE(x, i) \ argument
315 if ((x)->max_freeable < i) \
316 (x)->max_freeable = i; \
333 #define STATS_SET_FREEABLE(x, i) do { } while (0) argument
711 int i; in __ac_get_obj() local
724 for (i = 0; i < ac->avail; i++) { in __ac_get_obj()
726 if (!is_obj_pfmemalloc(ac->entry[i])) { in __ac_get_obj()
727 objp = ac->entry[i]; in __ac_get_obj()
728 ac->entry[i] = ac->entry[ac->avail]; in __ac_get_obj()
874 int i; in alloc_alien_cache() local
[all …]
Ddebug-pagealloc.c73 int i; in poison_pages() local
75 for (i = 0; i < n; i++) in poison_pages()
76 poison_page(page + i); in poison_pages()
128 int i; in unpoison_pages() local
130 for (i = 0; i < n; i++) in unpoison_pages()
131 unpoison_page(page + i); in unpoison_pages()
Dpage_alloc.c509 int i; in prep_compound_page() local
515 for (i = 1; i < nr_pages; i++) { in prep_compound_page()
516 struct page *p = page + i; in prep_compound_page()
1028 int i, bad = 0; in free_pages_prepare() local
1040 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1042 bad += free_tail_pages_check(page, page + i); in free_pages_prepare()
1043 bad += free_pages_check(page + i); in free_pages_prepare()
1163 int i; in deferred_free_range() local
1176 for (i = 0; i < nr_pages; i++, page++, pfn++) in deferred_free_range()
1199 int i, zid; in deferred_init_memmap() local
[all …]
Dmemory_hotplug.c188 unsigned long *usemap, mapsize, section_nr, i; in register_page_bootmem_info_section() local
207 for (i = 0; i < mapsize; i++, page++) in register_page_bootmem_info_section()
215 for (i = 0; i < mapsize; i++, page++) in register_page_bootmem_info_section()
222 unsigned long *usemap, mapsize, section_nr, i; in register_page_bootmem_info_section() local
241 for (i = 0; i < mapsize; i++, page++) in register_page_bootmem_info_section()
248 unsigned long i, pfn, end_pfn, nr_pages; in register_page_bootmem_info_node() local
256 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node()
267 for (i = 0; i < nr_pages; i++, page++) in register_page_bootmem_info_node()
506 unsigned long i; in __add_pages() local
513 for (i = start_sec; i <= end_sec; i++) { in __add_pages()
[all …]
Dswap.c422 int i; in pagevec_lru_move_fn() local
427 for (i = 0; i < pagevec_count(pvec); i++) { in pagevec_lru_move_fn()
428 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn()
571 int i; in __lru_cache_activate_page() local
583 for (i = pagevec_count(pvec) - 1; i >= 0; i--) { in __lru_cache_activate_page()
584 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
911 int i; in release_pages() local
918 for (i = 0; i < nr; i++) { in release_pages()
919 struct page *page = pages[i]; in release_pages()
1099 int i, j; in pagevec_remove_exceptionals() local
[all …]
Dhuge_memory.c1050 int ret = 0, i; in do_huge_pmd_wp_page_fallback() local
1062 for (i = 0; i < HPAGE_PMD_NR; i++) { in do_huge_pmd_wp_page_fallback()
1063 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | in do_huge_pmd_wp_page_fallback()
1066 if (unlikely(!pages[i] || in do_huge_pmd_wp_page_fallback()
1067 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, in do_huge_pmd_wp_page_fallback()
1069 if (pages[i]) in do_huge_pmd_wp_page_fallback()
1070 put_page(pages[i]); in do_huge_pmd_wp_page_fallback()
1071 while (--i >= 0) { in do_huge_pmd_wp_page_fallback()
1072 memcg = (void *)page_private(pages[i]); in do_huge_pmd_wp_page_fallback()
1073 set_page_private(pages[i], 0); in do_huge_pmd_wp_page_fallback()
[all …]
Dpage_ext.c72 int i; in invoke_need_callbacks() local
75 for (i = 0; i < entries; i++) { in invoke_need_callbacks()
76 if (page_ext_ops[i]->need && page_ext_ops[i]->need()) in invoke_need_callbacks()
85 int i; in invoke_init_callbacks() local
88 for (i = 0; i < entries; i++) { in invoke_init_callbacks()
89 if (page_ext_ops[i]->init) in invoke_init_callbacks()
90 page_ext_ops[i]->init(); in invoke_init_callbacks()
Dkmemleak-test.c50 int i; in kmemleak_test_init() local
79 for (i = 0; i < 10; i++) { in kmemleak_test_init()
88 for_each_possible_cpu(i) { in kmemleak_test_init()
89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); in kmemleak_test_init()
91 per_cpu(kmemleak_test_pointer, i)); in kmemleak_test_init()
Dfrontswap.c117 unsigned int i; in frontswap_register_ops() local
130 for_each_set_bit(i, a, MAX_SWAPFILES) in frontswap_register_ops()
131 ops->init(i); in frontswap_register_ops()
156 for (i = 0; i < MAX_SWAPFILES; i++) { in frontswap_register_ops()
157 if (!test_bit(i, a) && test_bit(i, b)) in frontswap_register_ops()
158 ops->init(i); in frontswap_register_ops()
159 else if (test_bit(i, a) && !test_bit(i, b)) in frontswap_register_ops()
160 ops->invalidate_area(i); in frontswap_register_ops()
Dprocess_vm_access.c154 unsigned long i; in process_vm_rw_core() local
165 for (i = 0; i < riovcnt; i++) { in process_vm_rw_core()
166 iov_len = rvec[i].iov_len; in process_vm_rw_core()
168 nr_pages_iov = ((unsigned long)rvec[i].iov_base in process_vm_rw_core()
170 / PAGE_SIZE - (unsigned long)rvec[i].iov_base in process_vm_rw_core()
213 for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++) in process_vm_rw_core()
215 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, in process_vm_rw_core()
Dmincore.c88 int i; in __mincore_unmapped_range() local
94 for (i = 0; i < nr; i++, pgoff++) in __mincore_unmapped_range()
95 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); in __mincore_unmapped_range()
97 for (i = 0; i < nr; i++) in __mincore_unmapped_range()
98 vec[i] = 0; in __mincore_unmapped_range()
Dmemtest.c67 u64 i; in do_one_pass() local
70 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start, in do_one_pass()
101 unsigned int i; in early_memtest() local
108 for (i = memtest_pattern-1; i < UINT_MAX; --i) { in early_memtest()
109 idx = i % ARRAY_SIZE(patterns); in early_memtest()

123