/mm/ |
D | hugetlb_cgroup.c | 60 int idx; in hugetlb_cgroup_have_usage() local 62 for (idx = 0; idx < hugetlb_max_hstate; idx++) { in hugetlb_cgroup_have_usage() 63 if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0) in hugetlb_cgroup_have_usage() 74 int idx; in hugetlb_cgroup_css_alloc() local 81 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) in hugetlb_cgroup_css_alloc() 82 res_counter_init(&h_cgroup->hugepage[idx], in hugetlb_cgroup_css_alloc() 83 &parent_h_cgroup->hugepage[idx]); in hugetlb_cgroup_css_alloc() 86 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) in hugetlb_cgroup_css_alloc() 87 res_counter_init(&h_cgroup->hugepage[idx], NULL); in hugetlb_cgroup_css_alloc() 108 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, in hugetlb_cgroup_move_parent() argument [all …]
|
D | early_ioremap.c | 48 static inline void __init __late_set_fixmap(enum fixed_addresses idx, in __late_set_fixmap() argument 56 static inline void __init __late_clear_fixmap(enum fixed_addresses idx) in __late_clear_fixmap() argument 102 enum fixed_addresses idx; in __early_ioremap() local 142 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; in __early_ioremap() 145 __late_set_fixmap(idx, phys_addr, prot); in __early_ioremap() 147 __early_set_fixmap(idx, phys_addr, prot); in __early_ioremap() 149 --idx; in __early_ioremap() 164 enum fixed_addresses idx; in early_iounmap() local 194 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; in early_iounmap() 197 __late_clear_fixmap(idx); in early_iounmap() [all …]
|
D | bootmem.c | 188 unsigned long idx, vec; in free_all_bootmem_core() local 191 idx = start - bdata->node_min_pfn; in free_all_bootmem_core() 192 shift = idx & (BITS_PER_LONG - 1); in free_all_bootmem_core() 197 vec = ~map[idx / BITS_PER_LONG]; in free_all_bootmem_core() 202 vec |= ~map[idx / BITS_PER_LONG + 1] << in free_all_bootmem_core() 291 unsigned long idx; in __free() local 300 for (idx = sidx; idx < eidx; idx++) in __free() 301 if (!test_and_clear_bit(idx, bdata->node_bootmem_map)) in __free() 308 unsigned long idx; in __reserve() local 317 for (idx = sidx; idx < eidx; idx++) in __reserve() [all …]
|
D | memblock.c | 466 int idx, phys_addr_t base, in memblock_insert_region() argument 470 struct memblock_region *rgn = &type->regions[idx]; in memblock_insert_region() 473 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); in memblock_insert_region() 795 void __init_memblock __next_mem_range(u64 *idx, int nid, in __next_mem_range() argument 801 int idx_a = *idx & 0xffffffff; in __next_mem_range() 802 int idx_b = *idx >> 32; in __next_mem_range() 831 *idx = (u32)idx_a | (u64)idx_b << 32; in __next_mem_range() 869 *idx = (u32)idx_a | (u64)idx_b << 32; in __next_mem_range() 876 *idx = ULLONG_MAX; in __next_mem_range() 895 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, in __next_mem_range_rev() argument [all …]
|
D | swapfile.c | 259 unsigned int idx) in swap_cluster_schedule_discard() argument 267 memset(si->swap_map + idx * SWAPFILE_CLUSTER, in swap_cluster_schedule_discard() 272 idx, 0); in swap_cluster_schedule_discard() 274 idx, 0); in swap_cluster_schedule_discard() 277 cluster_set_next(&si->cluster_info[tail], idx); in swap_cluster_schedule_discard() 279 idx, 0); in swap_cluster_schedule_discard() 292 unsigned int idx; in swap_do_scheduled_discard() local 297 idx = cluster_next(&si->discard_cluster_head); in swap_do_scheduled_discard() 300 cluster_next(&info[idx]), 0); in swap_do_scheduled_discard() 301 if (cluster_next(&si->discard_cluster_tail) == idx) { in swap_do_scheduled_discard() [all …]
|
D | page_cgroup.c | 362 unsigned long idx, max; in swap_cgroup_prepare() local 366 for (idx = 0; idx < ctrl->length; idx++) { in swap_cgroup_prepare() 370 ctrl->map[idx] = page; in swap_cgroup_prepare() 372 if (!(idx % SWAP_CLUSTER_MAX)) in swap_cgroup_prepare() 377 max = idx; in swap_cgroup_prepare() 378 for (idx = 0; idx < max; idx++) in swap_cgroup_prepare() 379 __free_page(ctrl->map[idx]); in swap_cgroup_prepare()
|
D | hugetlb.c | 1361 pgoff_t idx; in vma_needs_reservation() local 1368 idx = vma_hugecache_offset(h, vma, addr); in vma_needs_reservation() 1369 chg = region_chg(resv, idx, idx + 1); in vma_needs_reservation() 1380 pgoff_t idx; in vma_commit_reservation() local 1386 idx = vma_hugecache_offset(h, vma, addr); in vma_commit_reservation() 1387 region_add(resv, idx, idx + 1); in vma_commit_reservation() 1397 int ret, idx; in alloc_huge_page() local 1400 idx = hstate_index(h); in alloc_huge_page() 1416 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page() 1432 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page() [all …]
|
D | slab.h | 202 cache_from_memcg_idx(struct kmem_cache *s, int idx) in cache_from_memcg_idx() argument 212 cachep = params->memcg_caches[idx]; in cache_from_memcg_idx() 267 cache_from_memcg_idx(struct kmem_cache *s, int idx) in cache_from_memcg_idx() argument
|
D | slab.c | 397 static void set_obj_status(struct page *page, int idx, int val) in set_obj_status() argument 405 status[idx] = val; in set_obj_status() 408 static inline unsigned int get_obj_status(struct page *page, int idx) in get_obj_status() argument 417 return status[idx]; in get_obj_status() 421 static inline void set_obj_status(struct page *page, int idx, int val) {} in set_obj_status() argument 441 unsigned int idx) in index_to_obj() argument 443 return page->s_mem + cache->size * idx; in index_to_obj() 2467 static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) in get_free_obj() argument 2469 return ((freelist_idx_t *)page->freelist)[idx]; in get_free_obj() 2473 unsigned int idx, freelist_idx_t val) in set_free_obj() argument [all …]
|
D | memcontrol.c | 866 enum mem_cgroup_stat_index idx) in mem_cgroup_read_stat() argument 873 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat() 876 val += memcg->nocpu_base.count[idx]; in mem_cgroup_read_stat() 884 enum mem_cgroup_events_index idx) in mem_cgroup_read_events() argument 891 val += per_cpu(memcg->stat->events[idx], cpu); in mem_cgroup_read_events() 894 val += memcg->nocpu_base.events[idx]; in mem_cgroup_read_events() 1291 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) in __mem_cgroup_count_vm_event() argument 1300 switch (idx) { in __mem_cgroup_count_vm_event() 2282 enum mem_cgroup_stat_index idx, int val) in mem_cgroup_update_page_stat() argument 2287 this_cpu_add(memcg->stat->count[idx], val); in mem_cgroup_update_page_stat() [all …]
|
D | zsmalloc.c | 438 int idx = 0; in get_size_class_index() local 441 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index() 444 return min(zs_size_classes - 1, idx); in get_size_class_index()
|
D | page_alloc.c | 5669 enum zone_type j, idx; in setup_per_zone_lowmem_reserve() local 5678 idx = j; in setup_per_zone_lowmem_reserve() 5679 while (idx) { in setup_per_zone_lowmem_reserve() 5682 idx--; in setup_per_zone_lowmem_reserve() 5684 if (sysctl_lowmem_reserve_ratio[idx] < 1) in setup_per_zone_lowmem_reserve() 5685 sysctl_lowmem_reserve_ratio[idx] = 1; in setup_per_zone_lowmem_reserve() 5687 lower_zone = pgdat->node_zones + idx; in setup_per_zone_lowmem_reserve() 5689 sysctl_lowmem_reserve_ratio[idx]; in setup_per_zone_lowmem_reserve()
|
D | mempolicy.c | 2225 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) in mpol_shared_policy_lookup() argument 2233 sn = sp_lookup(sp, idx, idx+1); in mpol_shared_policy_lookup()
|
D | slub.c | 1415 int idx; in new_slab() local 1436 for_each_object_idx(p, idx, s, start, page->objects) { in new_slab() 1438 if (likely(idx < page->objects)) in new_slab()
|