Searched refs:page_to_nid (Results 1 – 22 of 22) sorted by relevance
/mm/ |
D | page_ext.c | 182 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 191 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
|
D | list_lru.c | 117 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() 141 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
|
D | hugetlb.c | 1072 int nid = page_to_nid(page); in enqueue_huge_page() 1276 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) in free_gigantic_page() 1350 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page() 1440 int nid = page_to_nid(page); in __free_huge_page() 1718 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page() 1815 int nid = page_to_nid(head); in dissolve_free_huge_page() 1920 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page() 2499 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc() 2609 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low() 5757 int old_nid = page_to_nid(oldpage); in move_hugetlb_state() [all …]
|
D | slub.c | 1252 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing() 1868 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab() 1923 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab() 2212 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab() 2377 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials() 2551 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match() 3072 n = get_node(s, page_to_nid(page)); in __slab_free() 3604 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc() 4765 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 4796 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() [all …]
|
D | sparse.c | 45 int page_to_nid(const struct page *page) in page_to_nid() function 49 EXPORT_SYMBOL(page_to_nid);
|
D | huge_memory.c | 490 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 500 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 1444 page_nid = page_to_nid(page); in do_huge_pmd_numa_page() 2665 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); in split_huge_page_to_list() 2831 memcg_set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
|
D | mempolicy.c | 456 int nid = page_to_nid(page); in queue_pages_required() 936 err = page_to_nid(p); in lookup_node() 2152 if (page && page_to_nid(page) == nid) { in alloc_page_interleave() 2478 int curnid = page_to_nid(page); in mpol_misplaced()
|
D | memremap.c | 149 nid = page_to_nid(first_page); in pageunmap_range()
|
D | slab.c | 559 page_node = page_to_nid(page); in cache_free_pfmemalloc() 796 int page_node = page_to_nid(virt_to_page(objp)); in cache_free_alien() 2596 page_node = page_to_nid(page); in cache_grow_begin() 2651 n = get_node(cachep, page_to_nid(page)); in cache_grow_end() 3141 nid = page_to_nid(page); in fallback_alloc()
|
D | migrate.c | 1541 nid = page_to_nid(page); in alloc_migration_target() 1636 if (page_to_nid(page) == node) in add_page_for_migration() 1816 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
|
D | slob.c | 326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
|
D | mprotect.c | 112 if (target_node == page_to_nid(page)) in change_pte_range()
|
D | khugepaged.c | 1329 node = page_to_nid(page); in khugepaged_scan_pmd() 2067 node = page_to_nid(page); in khugepaged_scan_file()
|
D | memory-failure.c | 288 drop_slab_node(page_to_nid(p)); in shake_page()
|
D | vmscan.c | 2206 nid = page_to_nid(page); in reclaim_pages() 2210 if (nid == page_to_nid(page)) { in reclaim_pages()
|
D | memory_hotplug.c | 1392 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); in do_migrate_range()
|
D | memcontrol.c | 587 int nid = page_to_nid(page); in mem_cgroup_page_nodeinfo() 601 int nid = page_to_nid(page); in soft_limit_tree_from_page() 2942 page_to_nid(page)); in memcg_alloc_page_obj_cgroups()
|
D | ksm.c | 1967 page_to_nid(tree_page) != nid) { in unstable_tree_search_insert()
|
D | vmalloc.c | 3490 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
|
D | swapfile.c | 3863 int nid = page_to_nid(page); in __cgroup_throttle_swaprate()
|
D | memory.c | 4618 page_nid = page_to_nid(page); in do_numa_page()
|
D | page_alloc.c | 2504 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
|