Home
last modified time | relevance | path

Searched refs:nid (Results 1 – 25 of 26) sorted by relevance

12

/mm/
Dmemory_hotplug.c287 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
317 err = sparse_add_section(nid, pfn, pfns, altmap); in __add_pages()
329 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
337 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn()
350 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
362 if (unlikely(pfn_to_nid(pfn) != nid)) in find_biggest_section_pfn()
381 int nid = zone_to_nid(zone); in shrink_zone_span() local
391 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
404 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, in shrink_zone_span()
647 int nid = zone_to_nid(zone); in node_states_check_changes_online() local
[all …]
Dsparse.c53 static void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument
55 section_to_node_table[section_nr] = nid; in set_section_nid()
58 static inline void set_section_nid(unsigned long section_nr, int nid) in set_section_nid() argument
64 static noinline struct mem_section __ref *sparse_index_alloc(int nid) in sparse_index_alloc() argument
71 section = kzalloc_node(array_size, GFP_KERNEL, nid); in sparse_index_alloc()
74 nid); in sparse_index_alloc()
77 __func__, array_size, nid); in sparse_index_alloc()
83 static int __meminit sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument
98 section = sparse_index_alloc(nid); in sparse_index_init()
107 static inline int sparse_index_init(unsigned long section_nr, int nid) in sparse_index_init() argument
[all …]
Dpage_ext.c135 static int __init alloc_node_page_ext(int nid) in alloc_node_page_ext() argument
141 nr_pages = NODE_DATA(nid)->node_spanned_pages; in alloc_node_page_ext()
150 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) || in alloc_node_page_ext()
151 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES)) in alloc_node_page_ext()
158 MEMBLOCK_ALLOC_ACCESSIBLE, nid); in alloc_node_page_ext()
161 NODE_DATA(nid)->node_page_ext = base; in alloc_node_page_ext()
169 int nid, fail; in page_ext_init_flatmem() local
174 for_each_online_node(nid) { in page_ext_init_flatmem()
175 fail = alloc_node_page_ext(nid); in page_ext_init_flatmem()
205 static void *__meminit alloc_page_ext(size_t size, int nid) in alloc_page_ext() argument
[all …]
Dmemblock.c193 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_bottom_up() argument
199 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { in __memblock_find_range_bottom_up()
228 phys_addr_t size, phys_addr_t align, int nid, in __memblock_find_range_top_down() argument
234 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, in __memblock_find_range_top_down()
267 phys_addr_t end, int nid, in memblock_find_in_range_node() argument
281 nid, flags); in memblock_find_in_range_node()
284 nid, flags); in memblock_find_in_range_node()
522 int nid, in memblock_insert_region() argument
532 memblock_set_region_node(rgn, nid); in memblock_insert_region()
555 int nid, enum memblock_flags flags) in memblock_add_range() argument
[all …]
Dhugetlb.c890 int nid = page_to_nid(page); in enqueue_huge_page() local
891 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
893 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
897 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument
901 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node_exact()
908 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node_exact()
914 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
918 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
927 zonelist = node_zonelist(nid, gfp_mask); in dequeue_huge_page_nodemask()
972 int nid; in dequeue_huge_page_vma() local
[all …]
Dpage_alloc.c405 int nid = early_pfn_to_nid(pfn); in early_page_uninitialised() local
407 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) in early_page_uninitialised()
418 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument
432 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) in defer_init()
442 NODE_DATA(nid)->first_deferred_pfn = pfn; in defer_init()
455 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) in defer_init() argument
1359 unsigned long zone, int nid) in __init_single_page() argument
1362 set_page_links(page, zone, nid, pfn); in __init_single_page()
1380 int nid, zid; in init_reserved_page() local
1385 nid = early_pfn_to_nid(pfn); in init_reserved_page()
[all …]
Dlist_lru.c127 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() local
128 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add()
138 memcg_set_shrinker_bit(memcg, nid, in list_lru_add()
151 int nid = page_to_nid(virt_to_page(item)); in list_lru_del() local
152 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del()
185 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument
187 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_one()
200 unsigned long list_lru_count_node(struct list_lru *lru, int nid) in list_lru_count_node() argument
204 nlru = &lru->node[nid]; in list_lru_count_node()
268 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in list_lru_walk_one() argument
[all …]
Dksm.c179 int nid; member
200 int nid; /* when node of unstable tree */ member
371 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); in stable_node_dup_del()
600 chain->nid = NUMA_NO_NODE; /* debug */ in alloc_stable_node_chain()
812 root_unstable_tree + NUMA(rmap_item->nid)); in remove_rmap_item_from_tree()
942 int nid; in remove_all_stable_nodes() local
945 for (nid = 0; nid < ksm_nr_node_ids; nid++) { in remove_all_stable_nodes()
946 while (root_stable_tree[nid].rb_node) { in remove_all_stable_nodes()
947 stable_node = rb_entry(root_stable_tree[nid].rb_node, in remove_all_stable_nodes()
950 root_stable_tree + nid)) { in remove_all_stable_nodes()
[all …]
Dmemremap.c108 int nid; in memunmap_pages() local
119 nid = page_to_nid(first_page); in memunmap_pages()
126 arch_remove_memory(nid, res->start, resource_size(res), in memunmap_pages()
157 void *memremap_pages(struct dev_pagemap *pgmap, int nid) in memremap_pages() argument
252 if (nid < 0) in memremap_pages()
253 nid = numa_mem_id(); in memremap_pages()
274 error = add_pages(nid, PHYS_PFN(res->start), in memremap_pages()
283 error = arch_add_memory(nid, res->start, resource_size(res), in memremap_pages()
290 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in memremap_pages()
303 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in memremap_pages()
Dvmscan.c474 int nid = shrinkctl->nid; in do_shrink_slab() local
480 nid = 0; in do_shrink_slab()
491 nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0); in do_shrink_slab()
587 &shrinker->nr_deferred[nid]); in do_shrink_slab()
589 new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); in do_shrink_slab()
591 trace_mm_shrink_slab_end(shrinker, nid, freed, nr, new_nr, total_scan); in do_shrink_slab()
596 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
609 map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, in shrink_slab_memcg()
617 .nid = nid, in shrink_slab_memcg()
657 memcg_set_shrinker_bit(memcg, nid, i); in shrink_slab_memcg()
[all …]
Dcompaction.c2407 static void compact_node(int nid) in compact_node() argument
2409 pg_data_t *pgdat = NODE_DATA(nid); in compact_node()
2439 int nid; in compact_nodes() local
2444 for_each_online_node(nid) in compact_nodes()
2445 compact_node(nid); in compact_nodes()
2469 int nid = dev->id; in sysfs_compact_node() local
2471 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { in sysfs_compact_node()
2475 compact_node(nid); in sysfs_compact_node()
2660 int kcompactd_run(int nid) in kcompactd_run() argument
2662 pg_data_t *pgdat = NODE_DATA(nid); in kcompactd_run()
[all …]
Dmempolicy.c425 int nid = page_to_nid(page); in queue_pages_required() local
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); in queue_pages_required()
1889 int nid; in offset_il_node() local
1894 nid = first_node(pol->v.nodes); in offset_il_node()
1896 nid = next_node(nid, pol->v.nodes); in offset_il_node()
1897 return nid; in offset_il_node()
1941 int nid; in huge_node() local
1947 nid = interleave_nid(*mpol, vma, addr, in huge_node()
1950 nid = policy_node(gfp_flags, *mpol, numa_node_id()); in huge_node()
1954 return nid; in huge_node()
[all …]
Dmm_init.c28 int nid; in mminit_verify_zonelist() local
33 for_each_online_node(nid) { in mminit_verify_zonelist()
34 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist()
53 listid > 0 ? "thisnode" : "general", nid, in mminit_verify_zonelist()
Dmemcontrol.c335 int nid; in memcg_expand_one_shrinker_map() local
339 for_each_node(nid) { in memcg_expand_one_shrinker_map()
341 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map()
354 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map()
365 int nid; in memcg_free_shrinker_maps() local
370 for_each_node(nid) { in memcg_free_shrinker_maps()
371 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_free_shrinker_maps()
382 int nid, size, ret = 0; in memcg_alloc_shrinker_maps() local
389 for_each_node(nid) { in memcg_alloc_shrinker_maps()
396 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps()
[all …]
Dworkingset.c200 int memcgid, nid; in unpack_shadow() local
205 nid = entry & ((1UL << NODES_SHIFT) - 1); in unpack_shadow()
211 *pgdat = NODE_DATA(nid); in unpack_shadow()
429 lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg); in count_shadow_nodes()
437 pages = node_present_pages(sc->nid); in count_shadow_nodes()
Dmmzone.c20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() local
22 if (nid == MAX_NUMNODES) in next_online_pgdat()
24 return NODE_DATA(nid); in next_online_pgdat()
Dkhugepaged.c710 static bool khugepaged_scan_abort(int nid) in khugepaged_scan_abort() argument
722 if (khugepaged_node_load[nid]) in khugepaged_scan_abort()
728 if (node_distance(nid, i) > node_reclaim_distance) in khugepaged_scan_abort()
744 int nid, target_node = 0, max_value = 0; in khugepaged_find_target_node() local
747 for (nid = 0; nid < MAX_NUMNODES; nid++) in khugepaged_find_target_node()
748 if (khugepaged_node_load[nid] > max_value) { in khugepaged_find_target_node()
749 max_value = khugepaged_node_load[nid]; in khugepaged_find_target_node()
750 target_node = nid; in khugepaged_find_target_node()
755 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; in khugepaged_find_target_node()
756 nid++) in khugepaged_find_target_node()
[all …]
Doom_kill.c256 int nid; in constrained_alloc() local
287 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc()
288 oc->totalpages += node_present_pages(nid); in constrained_alloc()
300 for_each_node_mask(nid, cpuset_current_mems_allowed) in constrained_alloc()
301 oc->totalpages += node_present_pages(nid); in constrained_alloc()
Dslab.c1123 int nid; in slab_memory_callback() local
1125 nid = mnb->status_change_nid; in slab_memory_callback()
1126 if (nid < 0) in slab_memory_callback()
1132 ret = init_cache_node_node(nid); in slab_memory_callback()
1137 ret = drain_cache_node_node(nid); in slab_memory_callback()
1260 int nid; in kmem_cache_init() local
1262 for_each_online_node(nid) { in kmem_cache_init()
1263 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); in kmem_cache_init()
1266 &init_kmem_cache_node[SIZE_NODE + nid], nid); in kmem_cache_init()
3111 int nid; in fallback_alloc() local
[all …]
Dswapfile.c673 int nid; in __del_from_avail_list() local
676 for_each_node(nid) in __del_from_avail_list()
677 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); in __del_from_avail_list()
706 int nid; in add_to_avail_list() local
709 for_each_node(nid) { in add_to_avail_list()
710 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); in add_to_avail_list()
711 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
2587 int nid; in SYSCALL_DEFINE1() local
2592 for_each_node(nid) { in SYSCALL_DEFINE1()
2593 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
[all …]
Dsparse-vmemmap.c249 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) in __populate_section_memmap() argument
266 if (vmemmap_populate(start, end, nid, altmap)) in __populate_section_memmap()
Dgup.c1414 int nid = page_to_nid(page); in new_non_cma_page() local
1434 return alloc_migrate_huge_page(h, gfp_mask, nid, NULL); in new_non_cma_page()
1449 thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER); in new_non_cma_page()
1456 return __alloc_pages_node(nid, gfp_mask, 0); in new_non_cma_page()
Dmemory-failure.c1620 int nid = page_to_nid(p); in new_page() local
1622 return new_page_nodemask(p, nid, &node_states[N_MEMORY]); in new_page()
Dslub.c2449 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2461 nid, gfpflags, &gfpflags); in slab_out_of_memory()
4186 int nid = marg->status_change_nid_normal; in slab_mem_going_online_callback() local
4193 if (nid < 0) in slab_mem_going_online_callback()
4214 s->node[nid] = n; in slab_mem_going_online_callback()
Dmigrate.c1916 int nid = (int) data; in alloc_misplaced_dst_page() local
1919 newpage = __alloc_pages_node(nid, in alloc_misplaced_dst_page()

12