/mm/ |
D | memory_hotplug.c | 309 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument 344 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap); in __add_pages() 354 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 362 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() 375 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 387 if (unlikely(pfn_to_nid(pfn) != nid)) in find_biggest_section_pfn() 403 int nid = zone_to_nid(zone); in shrink_zone_span() local 413 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 429 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span() 631 int nid = zone_to_nid(zone); in node_states_check_changes_online() local [all …]
|
D | sparse.c | 51 static void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument 53 section_to_node_table[section_nr] = nid; in set_section_nid() 56 static inline void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument 62 static noinline struct mem_section __ref *sparse_index_alloc(int nid) in sparse_index_alloc() argument 69 section = kzalloc_node(array_size, GFP_KERNEL, nid); in sparse_index_alloc() 72 nid); in sparse_index_alloc() 75 __func__, array_size, nid); in sparse_index_alloc() 81 static int __meminit sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument 96 section = sparse_index_alloc(nid); in sparse_index_init() 105 static inline int sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument [all …]
|
D | page_ext.c | 197 static int __init alloc_node_page_ext(int nid) in alloc_node_page_ext() argument 203 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext() 212 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || in alloc_node_page_ext() 213 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) in alloc_node_page_ext() 220 MEMBLOCK_ALLOC_ACCESSIBLE, nid); in alloc_node_page_ext() 223 NODE_DATA(nid)->node_page_ext = base; in alloc_node_page_ext() 231 int nid, fail; in page_ext_init_flatmem() local 236 for_each_online_node(nid) { in page_ext_init_flatmem() 237 fail = alloc_node_page_ext(nid); in page_ext_init_flatmem() 275 static void *__meminit alloc_page_ext(size_t size, int nid) in alloc_page_ext() argument [all …]
|
D | memblock.c | 212 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_bottom_up() argument 218 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up() 247 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_top_down() argument 253 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, in __memblock_find_range_top_down() 286 phys_addr_t end, int nid, in memblock_find_in_range_node() argument 300 nid, flags); in memblock_find_in_range_node() 303 nid, flags); in memblock_find_in_range_node() 543 int nid, in memblock_insert_region() argument 553 memblock_set_region_node(rgn, nid); in memblock_insert_region() 576 int nid, enum memblock_flags flags) in memblock_add_range() argument [all …]
|
D | hugetlb.c | 1072 int nid = page_to_nid(page); in enqueue_huge_page() local 1073 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page() 1075 h->free_huge_pages_node[nid]++; in enqueue_huge_page() 1079 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument 1084 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact() 1095 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact() 1102 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument 1111 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask() 1147 int nid; in dequeue_huge_page_vma() local 1163 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma() [all …]
|
D | list_lru.c | 117 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() local 118 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add() 128 memcg_set_shrinker_bit(memcg, nid, in list_lru_add() 141 int nid = page_to_nid(virt_to_page(item)); in list_lru_del() local 142 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del() 175 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 177 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_one() 190 unsigned long list_lru_count_node(struct list_lru *lru, int nid) in list_lru_count_node() argument 194 nlru = &lru->node[nid]; in list_lru_count_node() 258 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in list_lru_walk_one() argument [all …]
|
D | page_alloc.c | 422 int nid = early_pfn_to_nid(pfn); in early_page_uninitialised() local 424 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_uninitialised() 435 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument 449 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) in defer_init() 452 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) in defer_init() 461 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init() 479 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument 1532 unsigned long zone, int nid) in __init_single_page() argument 1535 set_page_links(page, zone, nid, pfn); in __init_single_page() 1553 int nid, zid; in init_reserved_page() local [all …]
|
D | ksm.c | 179 int nid; member 200 int nid; /* when node of unstable tree */ member 371 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); in stable_node_dup_del() 601 chain->nid = NUMA_NO_NODE; /* debug */ in alloc_stable_node_chain() 813 root_unstable_tree + NUMA(rmap_item->nid)); in remove_rmap_item_from_tree() 943 int nid; in remove_all_stable_nodes() local 946 for (nid = 0; nid < ksm_nr_node_ids; nid++) { in remove_all_stable_nodes() 947 while (root_stable_tree[nid].rb_node) { in remove_all_stable_nodes() 948 stable_node = rb_entry(root_stable_tree[nid].rb_node, in remove_all_stable_nodes() 951 root_stable_tree + nid)) { in remove_all_stable_nodes() [all …]
|
D | memremap.c | 143 int nid; in pageunmap_range() local 149 nid = page_to_nid(first_page); in pageunmap_range() 158 arch_remove_memory(nid, range->start, range_len(range), in pageunmap_range() 201 int range_id, int nid) in pagemap_range() argument 240 if (nid < 0) in pagemap_range() 241 nid = numa_mem_id(); in pagemap_range() 262 error = add_pages(nid, PHYS_PFN(range->start), in pagemap_range() 271 error = arch_add_memory(nid, range->start, range_len(range), in pagemap_range() 278 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range() 292 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range() [all …]
|
D | vmscan.c | 450 int nid = shrinkctl->nid; in do_shrink_slab() local 456 nid = 0; in do_shrink_slab() 467 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in do_shrink_slab() 563 &shrinker->nr_deferred[nid]); in do_shrink_slab() 565 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); in do_shrink_slab() 567 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); in do_shrink_slab() 572 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument 585 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, in shrink_slab_memcg() 593 .nid = nid, in shrink_slab_memcg() 633 memcg_set_shrinker_bit(memcg, nid, i); in shrink_slab_memcg() [all …]
|
D | compaction.c | 2611 static void compact_node(int nid) in compact_node() argument 2613 pg_data_t *pgdat = NODE_DATA(nid); in compact_node() 2643 int nid; in compact_nodes() local 2648 for_each_online_node(nid) in compact_nodes() 2649 compact_node(nid); in compact_nodes() 2665 int rc, nid; in compaction_proactiveness_sysctl_handler() local 2672 for_each_online_node(nid) { in compaction_proactiveness_sysctl_handler() 2673 pg_data_t *pgdat = NODE_DATA(nid); in compaction_proactiveness_sysctl_handler() 2704 int nid = dev->id; in sysfs_compact_node() local 2706 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { in sysfs_compact_node() [all …]
|
D | mempolicy.c | 456 int nid = page_to_nid(page); in queue_pages_required() local 459 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); in queue_pages_required() 1084 .nid = dest, in migrate_to_node() 1980 int nid; in offset_il_node() local 1985 nid = first_node(pol->v.nodes); in offset_il_node() 1987 nid = next_node(nid, pol->v.nodes); in offset_il_node() 1988 return nid; in offset_il_node() 2032 int nid; in huge_node() local 2038 nid = interleave_nid(*mpol, vma, addr, in huge_node() 2041 nid = policy_node(gfp_flags, *mpol, numa_node_id()); in huge_node() [all …]
|
D | mm_init.c | 29 int nid; in mminit_verify_zonelist() local 34 for_each_online_node(nid) { in mminit_verify_zonelist() 35 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() 54 listid > 0 ? "thisnode" : "general", nid, in mminit_verify_zonelist()
|
D | workingset.c | 201 int memcgid, nid; in unpack_shadow() local 206 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow() 212 *pgdat = NODE_DATA(nid); in unpack_shadow() 494 lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid)); in count_shadow_nodes() 504 pages = node_present_pages(sc->nid); in count_shadow_nodes()
|
D | mmzone.c | 21 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() local 23 if (nid == MAX_NUMNODES) in next_online_pgdat() 25 return NODE_DATA(nid); in next_online_pgdat()
|
D | memcontrol.c | 411 int nid; in memcg_expand_one_shrinker_map() local 415 for_each_node(nid) { in memcg_expand_one_shrinker_map() 417 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map() 422 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid); in memcg_expand_one_shrinker_map() 430 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map() 441 int nid; in memcg_free_shrinker_maps() local 446 for_each_node(nid) { in memcg_free_shrinker_maps() 447 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_free_shrinker_maps() 458 int nid, size, ret = 0; in memcg_alloc_shrinker_maps() local 465 for_each_node(nid) { in memcg_alloc_shrinker_maps() [all …]
|
D | khugepaged.c | 808 static bool khugepaged_scan_abort(int nid) in khugepaged_scan_abort() argument 820 if (khugepaged_node_load[nid]) in khugepaged_scan_abort() 826 if (node_distance(nid, i) > node_reclaim_distance) in khugepaged_scan_abort() 842 int nid, target_node = 0, max_value = 0; in khugepaged_find_target_node() local 845 for (nid = 0; nid < MAX_NUMNODES; nid++) in khugepaged_find_target_node() 846 if (khugepaged_node_load[nid] > max_value) { in khugepaged_find_target_node() 847 max_value = khugepaged_node_load[nid]; in khugepaged_find_target_node() 848 target_node = nid; in khugepaged_find_target_node() 853 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; in khugepaged_find_target_node() 854 nid++) in khugepaged_find_target_node() [all …]
|
D | swapfile.c | 669 int nid; in __del_from_avail_list() local 672 for_each_node(nid) in __del_from_avail_list() 673 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list() 708 int nid; in add_to_avail_list() local 716 for_each_node(nid) { in add_to_avail_list() 717 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); in add_to_avail_list() 718 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list() 2649 int nid; in SYSCALL_DEFINE1() local 2654 for_each_node(nid) { in SYSCALL_DEFINE1() 2655 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1() [all …]
|
D | cma.c | 249 int nid) in cma_declare_contiguous_nid() argument 345 highmem_start, limit, nid, true); in cma_declare_contiguous_nid() 361 limit, nid, true); in cma_declare_contiguous_nid() 368 limit, nid, true); in cma_declare_contiguous_nid()
|
D | slab.c | 1134 int nid; in slab_memory_callback() local 1136 nid = mnb->status_change_nid; in slab_memory_callback() 1137 if (nid < 0) in slab_memory_callback() 1143 ret = init_cache_node_node(nid); in slab_memory_callback() 1148 ret = drain_cache_node_node(nid); in slab_memory_callback() 1271 int nid; in kmem_cache_init() local 1273 for_each_online_node(nid) { in kmem_cache_init() 1274 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); in kmem_cache_init() 1277 &init_kmem_cache_node[SIZE_NODE + nid], nid); in kmem_cache_init() 3103 int nid; in fallback_alloc() local [all …]
|
D | oom_kill.c | 262 int nid; in constrained_alloc() local 293 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc() 294 oc->totalpages += node_present_pages(nid); in constrained_alloc() 306 for_each_node_mask(nid, cpuset_current_mems_allowed) in constrained_alloc() 307 oc->totalpages += node_present_pages(nid); in constrained_alloc()
|
D | migrate.c | 1534 int nid; in alloc_migration_target() local 1539 nid = mtc->nid; in alloc_migration_target() 1540 if (nid == NUMA_NO_NODE) in alloc_migration_target() 1541 nid = page_to_nid(page); in alloc_migration_target() 1547 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); in alloc_migration_target() 1563 new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target() 1589 .nid = node, in do_move_pages_to_node() 2002 int nid = (int) data; in alloc_misplaced_dst_page() local 2005 newpage = __alloc_pages_node(nid, in alloc_misplaced_dst_page()
|
D | sparse-vmemmap.c | 252 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument 261 if (vmemmap_populate(start, end, nid, altmap)) in __populate_section_memmap()
|
D | internal.h | 661 int nid; /* preferred node id */ member
|
D | slub.c | 2586 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument 2598 nid, gfpflags, &gfpflags); in slab_out_of_memory() 4329 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback() local 4336 if (nid < 0) in slab_mem_going_online_callback() 4357 s->node[nid] = n; in slab_mem_going_online_callback()
|