/mm/ |
D | sparse.c | 51 static void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument 53 section_to_node_table[section_nr] = nid; in set_section_nid() 56 static inline void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument 62 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) in sparse_index_alloc() argument 69 if (node_state(nid, N_HIGH_MEMORY)) in sparse_index_alloc() 70 section = kzalloc_node(array_size, GFP_KERNEL, nid); in sparse_index_alloc() 74 section = memblock_virt_alloc_node(array_size, nid); in sparse_index_alloc() 80 static int __meminit sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument 88 section = sparse_index_alloc(nid); in sparse_index_init() 97 static inline int sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument [all …]
|
D | page_cgroup.c | 45 static int __init alloc_node_page_cgroup(int nid) in alloc_node_page_cgroup() argument 51 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_cgroup() 59 BOOTMEM_ALLOC_ACCESSIBLE, nid); in alloc_node_page_cgroup() 62 NODE_DATA(nid)->node_page_cgroup = base; in alloc_node_page_cgroup() 70 int nid, fail; in page_cgroup_init_flatmem() local 75 for_each_online_node(nid) { in page_cgroup_init_flatmem() 76 fail = alloc_node_page_cgroup(nid); in page_cgroup_init_flatmem() 109 static void *__meminit alloc_page_cgroup(size_t size, int nid) in alloc_page_cgroup() argument 114 addr = alloc_pages_exact_nid(nid, size, flags); in alloc_page_cgroup() 120 if (node_state(nid, N_HIGH_MEMORY)) in alloc_page_cgroup() [all …]
|
D | memblock.c | 118 phys_addr_t size, phys_addr_t align, int nid) in __memblock_find_range_bottom_up() argument 123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up() 150 phys_addr_t size, phys_addr_t align, int nid) in __memblock_find_range_top_down() argument 155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { in __memblock_find_range_top_down() 193 phys_addr_t end, int nid) in memblock_find_in_range_node() argument 218 size, align, nid); in memblock_find_in_range_node() 236 return __memblock_find_range_top_down(start, end, size, align, nid); in memblock_find_in_range_node() 468 int nid, unsigned long flags) in memblock_insert_region() argument 477 memblock_set_region_node(rgn, nid); in memblock_insert_region() 500 int nid, unsigned long flags) in memblock_add_range() argument [all …]
|
D | memory_hotplug.c | 329 int nid = zone->zone_pgdat->node_id; in fix_zone_id() local 333 set_page_links(pfn_to_page(pfn), zid, nid, pfn); in fix_zone_id() 447 int nid = pgdat->node_id; in __add_zone() local 462 memmap_init_zone(nr_pages, nid, zone_type, in __add_zone() 467 static int __meminit __add_section(int nid, struct zone *zone, in __add_section() argument 485 return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); in __add_section() 494 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, in __add_pages() argument 505 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); in __add_pages() 523 static int find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 535 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() [all …]
|
D | hugetlb.c | 517 int nid = page_to_nid(page); in enqueue_huge_page() local 518 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 520 h->free_huge_pages_node[nid]++; in enqueue_huge_page() 523 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node() argument 527 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node() 534 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node() 539 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node() 616 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) in next_node_allowed() argument 618 nid = next_node(nid, *nodes_allowed); in next_node_allowed() 619 if (nid == MAX_NUMNODES) in next_node_allowed() [all …]
|
D | list_lru.c | 15 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() local 16 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add() 23 node_set(nid, lru->active_nodes); in list_lru_add() 34 int nid = page_to_nid(virt_to_page(item)); in list_lru_del() local 35 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del() 41 node_clear(nid, lru->active_nodes); in list_lru_del() 52 list_lru_count_node(struct list_lru *lru, int nid) in list_lru_count_node() argument 55 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_node() 67 list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, in list_lru_walk_node() argument 71 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_node() [all …]
|
D | ksm.c | 145 int nid; member 166 int nid; /* when node of unstable tree */ member 514 root_stable_tree + NUMA(stable_node->nid)); in remove_node_from_stable_tree() 650 root_unstable_tree + NUMA(rmap_item->nid)); in remove_rmap_item_from_tree() 745 int nid; in remove_all_stable_nodes() local 748 for (nid = 0; nid < ksm_nr_node_ids; nid++) { in remove_all_stable_nodes() 749 while (root_stable_tree[nid].rb_node) { in remove_all_stable_nodes() 750 stable_node = rb_entry(root_stable_tree[nid].rb_node, in remove_all_stable_nodes() 1154 int nid; in stable_tree_search() local 1168 nid = get_kpfn_nid(page_to_pfn(page)); in stable_tree_search() [all …]
|
D | page_alloc.c | 2963 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument 2970 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node() 3044 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument 3047 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid() 3144 void si_meminfo_node(struct sysinfo *val, int nid) in si_meminfo_node() argument 3148 pg_data_t *pgdat = NODE_DATA(nid); in si_meminfo_node() 3153 val->sharedram = node_page_state(nid, NR_SHMEM); in si_meminfo_node() 3154 val->freeram = node_page_state(nid, NR_FREE_PAGES); in si_meminfo_node() 3171 bool skip_free_areas_node(unsigned int flags, int nid) in skip_free_areas_node() argument 3181 ret = !node_isset(nid, cpuset_current_mems_allowed); in skip_free_areas_node() [all …]
|
D | mm_init.c | 26 int nid; in mminit_verify_zonelist() local 31 for_each_online_node(nid) { in mminit_verify_zonelist() 32 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() 51 listid > 0 ? "thisnode" : "general", nid, in mminit_verify_zonelist() 134 unsigned long nid, unsigned long pfn) in mminit_verify_page_links() argument 136 BUG_ON(page_to_nid(page) != nid); in mminit_verify_page_links()
|
D | workingset.c | 172 int zid, nid; in unpack_shadow() local 177 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow() 181 *zone = NODE_DATA(nid)->node_zones + zid; in unpack_shadow() 278 shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid); in count_shadow_nodes() 281 pages = node_present_pages(sc->nid); in count_shadow_nodes() 379 ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, in scan_shadow_nodes()
|
D | mempolicy.c | 496 int nid; in queue_pages_pte_range() local 509 nid = page_to_nid(page); in queue_pages_pte_range() 510 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) in queue_pages_pte_range() 527 int nid; in queue_pages_hugetlb_pmd_range() local 537 nid = page_to_nid(page); in queue_pages_hugetlb_pmd_range() 538 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) in queue_pages_hugetlb_pmd_range() 1720 unsigned nid, next; in interleave_nodes() local 1723 nid = me->il_next; in interleave_nodes() 1724 next = next_node(nid, policy->v.nodes); in interleave_nodes() 1729 return nid; in interleave_nodes() [all …]
|
D | vmscan.c | 285 int nid = shrinkctl->nid; in shrink_slab_node() local 298 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in shrink_slab_node() 378 &shrinker->nr_deferred[nid]); in shrink_slab_node() 380 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); in shrink_slab_node() 382 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); in shrink_slab_node() 428 shrinkctl->nid = 0; in shrink_slab() 434 for_each_node_mask(shrinkctl->nid, shrinkctl->nodes_to_scan) { in shrink_slab() 435 if (node_online(shrinkctl->nid)) in shrink_slab() 2807 int nid; in try_to_free_mem_cgroup_pages() local 2824 nid = mem_cgroup_select_victim_node(memcg); in try_to_free_mem_cgroup_pages() [all …]
|
D | memcontrol.c | 672 int nid = zone_to_nid(zone); in mem_cgroup_zone_zoneinfo() local 675 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_zone_zoneinfo() 686 int nid = page_to_nid(page); in mem_cgroup_page_zoneinfo() local 689 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_page_zoneinfo() 693 soft_limit_tree_node_zone(int nid, int zid) in soft_limit_tree_node_zone() argument 695 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; in soft_limit_tree_node_zone() 701 int nid = page_to_nid(page); in soft_limit_tree_from_page() local 704 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid]; in soft_limit_tree_from_page() 798 int nid, zid; in mem_cgroup_remove_from_trees() local 800 for_each_node(nid) { in mem_cgroup_remove_from_trees() [all …]
|
D | compaction.c | 1478 static void compact_node(int nid) in compact_node() argument 1486 __compact_pgdat(NODE_DATA(nid), &cc); in compact_node() 1492 int nid; in compact_nodes() local 1497 for_each_online_node(nid) in compact_nodes() 1498 compact_node(nid); in compact_nodes() 1527 int nid = dev->id; in sysfs_compact_node() local 1529 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { in sysfs_compact_node() 1533 compact_node(nid); in sysfs_compact_node()
|
D | mmzone.c | 19 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() local 21 if (nid == MAX_NUMNODES) in next_online_pgdat() 23 return NODE_DATA(nid); in next_online_pgdat()
|
D | oom_kill.c | 206 int nid; in constrained_alloc() local 228 for_each_node_mask(nid, *nodemask) in constrained_alloc() 229 *totalpages += node_spanned_pages(nid); in constrained_alloc() 241 for_each_node_mask(nid, cpuset_current_mems_allowed) in constrained_alloc() 242 *totalpages += node_spanned_pages(nid); in constrained_alloc()
|
D | huge_memory.c | 2263 static bool khugepaged_scan_abort(int nid) in khugepaged_scan_abort() argument 2275 if (khugepaged_node_load[nid]) in khugepaged_scan_abort() 2281 if (node_distance(nid, i) > RECLAIM_DISTANCE) in khugepaged_scan_abort() 2291 int nid, target_node = 0, max_value = 0; in khugepaged_find_target_node() local 2294 for (nid = 0; nid < MAX_NUMNODES; nid++) in khugepaged_find_target_node() 2295 if (khugepaged_node_load[nid] > max_value) { in khugepaged_find_target_node() 2296 max_value = khugepaged_node_load[nid]; in khugepaged_find_target_node() 2297 target_node = nid; in khugepaged_find_target_node() 2302 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; in khugepaged_find_target_node() 2303 nid++) in khugepaged_find_target_node() [all …]
|
D | slab.c | 1313 int nid; in slab_memory_callback() local 1315 nid = mnb->status_change_nid; in slab_memory_callback() 1316 if (nid < 0) in slab_memory_callback() 1322 ret = init_cache_node_node(nid); in slab_memory_callback() 1327 ret = drain_cache_node_node(nid); in slab_memory_callback() 1448 int nid; in kmem_cache_init() local 1450 for_each_online_node(nid) { in kmem_cache_init() 1451 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); in kmem_cache_init() 1454 &init_kmem_cache_node[SIZE_NODE + nid], nid); in kmem_cache_init() 3002 int nid; in fallback_alloc() local [all …]
|
D | sparse-vmemmap.c | 179 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) in sparse_mem_map_populate() argument 189 if (vmemmap_populate(start, end, nid)) in sparse_mem_map_populate()
|
D | internal.h | 343 enum zone_type zone, unsigned long nid, unsigned long pfn); 358 enum zone_type zone, unsigned long nid, unsigned long pfn) in mminit_verify_page_links() argument
|
D | memory-failure.c | 247 int nid = page_to_nid(p); in shake_page() local 252 node_set(nid, shrink.nodes_to_scan); in shake_page() 1477 int nid = page_to_nid(p); in new_page() local 1480 nid); in new_page() 1482 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); in new_page()
|
D | nobootmem.c | 35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, in __alloc_memory_core_early() argument 44 addr = memblock_find_in_range_node(size, align, goal, limit, nid); in __alloc_memory_core_early()
|
D | migrate.c | 1583 int nid = (int) data; in alloc_misplaced_dst_page() local 1586 newpage = alloc_pages_exact_node(nid, in alloc_misplaced_dst_page()
|
D | slub.c | 2164 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument 2176 nid, gfpflags); in slab_out_of_memory() 3542 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback() local 3549 if (nid < 0) in slab_mem_going_online_callback() 3570 s->node[nid] = n; in slab_mem_going_online_callback()
|