Home
last modified time | relevance | path

Searched refs:idx (Results 1 – 22 of 22) sorted by relevance

/mm/
Dhugetlb_cgroup.c61 int idx; in hugetlb_cgroup_have_usage() local
63 for (idx = 0; idx < hugetlb_max_hstate; idx++) { in hugetlb_cgroup_have_usage()
64 if (page_counter_read(&h_cg->hugepage[idx])) in hugetlb_cgroup_have_usage()
73 int idx; in hugetlb_cgroup_init() local
75 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { in hugetlb_cgroup_init()
76 struct page_counter *counter = &h_cgroup->hugepage[idx]; in hugetlb_cgroup_init()
82 parent = &parent_h_cgroup->hugepage[idx]; in hugetlb_cgroup_init()
86 1 << huge_page_order(&hstates[idx])); in hugetlb_cgroup_init()
125 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, in hugetlb_cgroup_move_parent() argument
146 page_counter_charge(&parent->hugepage[idx], nr_pages); in hugetlb_cgroup_move_parent()
[all …]
Dmemblock.c549 int idx, phys_addr_t base, in memblock_insert_region() argument
554 struct memblock_region *rgn = &type->regions[idx]; in memblock_insert_region()
557 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); in memblock_insert_region()
589 int idx, nr_new; in memblock_add_range() local
614 for_each_memblock_type(idx, type, rgn) { in memblock_add_range()
633 memblock_insert_region(type, idx++, base, in memblock_add_range()
645 memblock_insert_region(type, idx, base, end - base, in memblock_add_range()
728 int idx; in memblock_isolate_range() local
741 for_each_memblock_type(idx, type, rgn) { in memblock_isolate_range()
758 memblock_insert_region(type, idx, rbase, base - rbase, in memblock_isolate_range()
[all …]
Dswap_cgroup.c43 unsigned long idx, max; in swap_cgroup_prepare() local
47 for (idx = 0; idx < ctrl->length; idx++) { in swap_cgroup_prepare()
51 ctrl->map[idx] = page; in swap_cgroup_prepare()
53 if (!(idx % SWAP_CLUSTER_MAX)) in swap_cgroup_prepare()
58 max = idx; in swap_cgroup_prepare()
59 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare()
60 __free_page(ctrl->map[idx]); in swap_cgroup_prepare()
Dearly_ioremap.c57 static inline void __init __late_set_fixmap(enum fixed_addresses idx, in __late_set_fixmap() argument
65 static inline void __init __late_clear_fixmap(enum fixed_addresses idx) in __late_clear_fixmap() argument
111 enum fixed_addresses idx; in __early_ioremap() local
151 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; in __early_ioremap()
154 __late_set_fixmap(idx, phys_addr, prot); in __early_ioremap()
156 __early_set_fixmap(idx, phys_addr, prot); in __early_ioremap()
158 --idx; in __early_ioremap()
173 enum fixed_addresses idx; in early_iounmap() local
203 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; in early_iounmap()
206 __late_clear_fixmap(idx); in early_iounmap()
[all …]
Dvmacache.c64 int idx = VMACACHE_HASH(addr); in vmacache_find() local
73 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find()
85 if (++idx == VMACACHE_SIZE) in vmacache_find()
86 idx = 0; in vmacache_find()
97 int idx = VMACACHE_HASH(start); in vmacache_find_exact() local
106 struct vm_area_struct *vma = current->vmacache.vmas[idx]; in vmacache_find_exact()
112 if (++idx == VMACACHE_SIZE) in vmacache_find_exact()
113 idx = 0; in vmacache_find_exact()
Dswapfile.c402 unsigned int idx) in cluster_list_add_tail() argument
405 cluster_set_next_flag(&list->head, idx, 0); in cluster_list_add_tail()
406 cluster_set_next_flag(&list->tail, idx, 0); in cluster_list_add_tail()
417 cluster_set_next(ci_tail, idx); in cluster_list_add_tail()
419 cluster_set_next_flag(&list->tail, idx, 0); in cluster_list_add_tail()
426 unsigned int idx; in cluster_list_del_first() local
428 idx = cluster_next(&list->head); in cluster_list_del_first()
429 if (cluster_next(&list->tail) == idx) { in cluster_list_del_first()
434 cluster_next(&ci[idx]), 0); in cluster_list_del_first()
436 return idx; in cluster_list_del_first()
[all …]
Dhugetlb.c1990 pgoff_t idx; in __vma_reservation_common() local
1997 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2000 ret = region_chg(resv, idx, idx + 1); in __vma_reservation_common()
2003 ret = region_add(resv, idx, idx + 1); in __vma_reservation_common()
2006 region_abort(resv, idx, idx + 1); in __vma_reservation_common()
2011 ret = region_add(resv, idx, idx + 1); in __vma_reservation_common()
2013 region_abort(resv, idx, idx + 1); in __vma_reservation_common()
2014 ret = region_del(resv, idx, idx + 1); in __vma_reservation_common()
2122 int ret, idx; in alloc_huge_page() local
2125 idx = hstate_index(h); in alloc_huge_page()
[all …]
Dslab_common.c648 int idx; in memcg_create_kmem_cache() local
662 idx = memcg_cache_id(memcg); in memcg_create_kmem_cache()
671 if (arr->entries[idx]) in memcg_create_kmem_cache()
701 arr->entries[idx] = s; in memcg_create_kmem_cache()
795 int idx; in memcg_deactivate_kmem_caches() local
800 idx = memcg_cache_id(memcg); in memcg_deactivate_kmem_caches()
809 c = arr->entries[idx]; in memcg_deactivate_kmem_caches()
814 arr->entries[idx] = NULL; in memcg_deactivate_kmem_caches()
1229 int idx = 0; in kmalloc_cache_name() local
1233 idx++; in kmalloc_cache_name()
[all …]
Dmemtest.c103 unsigned int idx = 0; in early_memtest() local
110 idx = i % ARRAY_SIZE(patterns); in early_memtest()
111 do_one_pass(patterns[idx], start, end); in early_memtest()
Dswap_state.c117 pgoff_t idx = swp_offset(entry); in add_to_swap_cache() local
118 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
162 pgoff_t idx = swp_offset(entry); in __delete_from_swap_cache() local
163 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache()
Dlist_lru.c46 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) in list_lru_from_memcg_idx() argument
55 if (memcg_lrus && idx >= 0) in list_lru_from_memcg_idx()
56 return memcg_lrus->lru[idx]; in list_lru_from_memcg_idx()
110 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx) in list_lru_from_memcg_idx() argument
Dmemcontrol.c689 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument
696 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state()
704 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state()
706 atomic_long_add(x, &mi->vmstats[idx]); in __mod_memcg_state()
709 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state()
733 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, in __mod_lruvec_state() argument
742 __mod_node_page_state(pgdat, idx, val); in __mod_lruvec_state()
751 __mod_memcg_state(memcg, idx, val); in __mod_lruvec_state()
754 __this_cpu_add(pn->lruvec_stat_local->count[idx], val); in __mod_lruvec_state()
756 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); in __mod_lruvec_state()
[all …]
Duserfaultfd.c189 pgoff_t idx; in __mcopy_atomic_hugetlb() local
270 idx = linear_page_index(dst_vma, dst_addr); in __mcopy_atomic_hugetlb()
272 hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); in __mcopy_atomic_hugetlb()
Dz3fold.c377 int idx = 0; in __encode_handle() local
387 idx = __idx(zhdr, bud); in __encode_handle()
388 h += idx; in __encode_handle()
392 slots->slot[idx] = h; in __encode_handle()
393 return (unsigned long)&slots->slot[idx]; in __encode_handle()
Dzsmalloc.c541 int idx = 0; in get_size_class_index() local
544 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
547 return min_t(int, ZS_SIZE_CLASSES - 1, idx); in get_size_class_index()
1897 int idx = 0; in replace_sub_page() local
1902 pages[idx] = newpage; in replace_sub_page()
1904 pages[idx] = page; in replace_sub_page()
1905 idx++; in replace_sub_page()
Dsparse.c225 int idx = subsection_map_index(pfn); in subsection_mask_set() local
228 bitmap_set(map, idx, end - idx + 1); in subsection_mask_set()
Dslub.c1554 unsigned int idx; in next_freelist_entry() local
1561 idx = s->random_seq[*pos]; in next_freelist_entry()
1565 } while (unlikely(idx >= page_limit)); in next_freelist_entry()
1567 return (char *)start + idx; in next_freelist_entry()
1576 unsigned long idx, pos, page_limit, freelist_count; in shuffle_freelist() local
1593 for (idx = 1; idx < page->objects; idx++) { in shuffle_freelist()
1622 int idx; in allocate_slab() local
1673 for (idx = 0, p = start; idx < page->objects - 1; idx++) { in allocate_slab()
Dpage_alloc.c6724 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
6729 zone->name = zone_names[idx]; in zone_init_internals()
7711 enum zone_type j, idx; in setup_per_zone_lowmem_reserve() local
7720 idx = j; in setup_per_zone_lowmem_reserve()
7721 while (idx) { in setup_per_zone_lowmem_reserve()
7724 idx--; in setup_per_zone_lowmem_reserve()
7725 lower_zone = pgdat->node_zones + idx; in setup_per_zone_lowmem_reserve()
7727 if (sysctl_lowmem_reserve_ratio[idx] < 1) { in setup_per_zone_lowmem_reserve()
7728 sysctl_lowmem_reserve_ratio[idx] = 0; in setup_per_zone_lowmem_reserve()
7732 managed_pages / sysctl_lowmem_reserve_ratio[idx]; in setup_per_zone_lowmem_reserve()
Dslab.c375 unsigned int idx) in index_to_obj() argument
377 return page->s_mem + cache->size * idx; in index_to_obj()
2320 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) in get_free_obj() argument
2322 return ((freelist_idx_t *)page->freelist)[idx]; in get_free_obj()
2326 unsigned int idx, freelist_idx_t val) in set_free_obj() argument
2328 ((freelist_idx_t *)(page->freelist))[idx] = val; in set_free_obj()
Dmemory.c4499 void (*process_subpage)(unsigned long addr, int idx, void *arg), in process_huge_page() argument
4558 static void clear_subpage(unsigned long addr, int idx, void *arg) in clear_subpage() argument
4562 clear_user_highpage(page + idx, addr); in clear_subpage()
4604 static void copy_subpage(unsigned long addr, int idx, void *arg) in copy_subpage() argument
4608 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, in copy_subpage()
Dmempolicy.c2346 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) in mpol_shared_policy_lookup() argument
2354 sn = sp_lookup(sp, idx, idx+1); in mpol_shared_policy_lookup()
/mm/kasan/
Dcommon.c322 u8 idx = 0; in kasan_set_free_info() local
327 idx = alloc_meta->free_track_idx; in kasan_set_free_info()
328 alloc_meta->free_pointer_tag[idx] = tag; in kasan_set_free_info()
329 alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS; in kasan_set_free_info()
332 set_track(&alloc_meta->free_track[idx], GFP_NOWAIT); in kasan_set_free_info()