Home
last modified time | relevance | path

Searched refs:node (Results 1 – 25 of 29) sorted by relevance

12

/mm/
Dinterval_tree.c28 void vma_interval_tree_insert_after(struct vm_area_struct *node, in vma_interval_tree_insert_after() argument
34 unsigned long last = vma_last_pgoff(node); in vma_interval_tree_insert_after()
36 VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); in vma_interval_tree_insert_after()
55 node->shared.rb_subtree_last = last; in vma_interval_tree_insert_after()
56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after()
57 rb_insert_augmented(&node->shared.rb, &root->rb_root, in vma_interval_tree_insert_after()
75 void anon_vma_interval_tree_insert(struct anon_vma_chain *node, in INTERVAL_TREE_DEFINE()
79 node->cached_vma_start = avc_start_pgoff(node); in INTERVAL_TREE_DEFINE()
80 node->cached_vma_last = avc_last_pgoff(node); in INTERVAL_TREE_DEFINE()
82 __anon_vma_interval_tree_insert(node, root); in INTERVAL_TREE_DEFINE()
[all …]
Dsparse-vmemmap.c39 static void * __ref __earlyonly_bootmem_alloc(int node, in __earlyonly_bootmem_alloc() argument
45 MEMBLOCK_ALLOC_ACCESSIBLE, node); in __earlyonly_bootmem_alloc()
48 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument
57 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
68 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block()
76 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, in vmemmap_alloc_block_buf() argument
86 ptr = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf()
132 void __meminit vmemmap_verify(pte_t *pte, int node, in vmemmap_verify() argument
138 if (node_distance(actual_node, node) > LOCAL_DISTANCE) in vmemmap_verify()
143 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, in vmemmap_pte_populate() argument
[all …]
Dslab.c213 int node, struct list_head *list);
493 int node = __this_cpu_read(slab_reap_node); in next_reap_node() local
495 node = next_node_in(node, node_online_map); in next_reap_node()
496 __this_cpu_write(slab_reap_node, node); in next_reap_node()
533 static struct array_cache *alloc_arraycache(int node, int entries, in alloc_arraycache() argument
539 ac = kmalloc_node(memsize, gfp, node); in alloc_arraycache()
607 static inline struct alien_cache **alloc_alien_cache(int node, in alloc_alien_cache() argument
644 static struct alien_cache *__alloc_alien_cache(int node, int entries, in __alloc_alien_cache() argument
650 alc = kmalloc_node(memsize, gfp, node); in __alloc_alien_cache()
659 static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) in alloc_alien_cache() argument
[all …]
Dworkingset.c433 void workingset_update_node(struct xa_node *node) in workingset_update_node() argument
445 if (node->count && node->count == node->nr_values) { in workingset_update_node()
446 if (list_empty(&node->private_list)) { in workingset_update_node()
447 list_lru_add(&shadow_nodes, &node->private_list); in workingset_update_node()
448 __inc_lruvec_slab_state(node, WORKINGSET_NODES); in workingset_update_node()
451 if (!list_empty(&node->private_list)) { in workingset_update_node()
452 list_lru_del(&shadow_nodes, &node->private_list); in workingset_update_node()
453 __dec_lruvec_slab_state(node, WORKINGSET_NODES); in workingset_update_node()
521 struct xa_node *node = container_of(item, struct xa_node, private_list); in shadow_lru_isolate() local
537 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate()
[all …]
Dlist_lru.c118 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_add()
142 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_del()
177 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_count_one()
194 nlru = &lru->node[nid]; in list_lru_count_node()
262 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_one()
278 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_one_irq()
299 struct list_lru_node *nlru = &lru->node[nid]; in list_lru_walk_node()
448 if (memcg_init_list_lru_node(&lru->node[i])) in memcg_init_list_lru()
454 if (!lru->node[i].memcg_lrus) in memcg_init_list_lru()
456 memcg_destroy_list_lru_node(&lru->node[i]); in memcg_init_list_lru()
[all …]
Dslob.c191 static void *slob_new_pages(gfp_t gfp, int order, int node) in slob_new_pages() argument
196 if (node != NUMA_NO_NODE) in slob_new_pages()
197 page = __alloc_pages_node(node, gfp, order); in slob_new_pages()
301 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, in slob_alloc() argument
326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
358 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); in slob_alloc()
469 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) in __do_kmalloc_node() argument
495 m = slob_alloc(size + minalign, gfp, align, node, minalign); in __do_kmalloc_node()
503 size, size + minalign, gfp, node); in __do_kmalloc_node()
509 ret = slob_new_pages(gfp, order, node); in __do_kmalloc_node()
[all …]
Dslub.c1110 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1112 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1122 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1124 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1137 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1139 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1525 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1529 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1531 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1668 gfp_t flags, int node, struct kmem_cache_order_objects oo) in alloc_slab_page() argument
[all …]
Dvmalloc.c462 get_subtree_max_size(struct rb_node *node) in get_subtree_max_size() argument
466 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
862 struct rb_node *node; in find_vmap_lowest_match() local
866 node = free_vmap_area_root.rb_node; in find_vmap_lowest_match()
871 while (node) { in find_vmap_lowest_match()
872 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
874 if (get_subtree_max_size(node->rb_left) >= length && in find_vmap_lowest_match()
876 node = node->rb_left; in find_vmap_lowest_match()
886 if (get_subtree_max_size(node->rb_right) >= length) { in find_vmap_lowest_match()
887 node = node->rb_right; in find_vmap_lowest_match()
[all …]
Dhugetlb.c1109 int node = NUMA_NO_NODE; in dequeue_huge_page_nodemask() local
1124 if (zone_to_nid(zone) == node) in dequeue_huge_page_nodemask()
1126 node = zone_to_nid(zone); in dequeue_huge_page_nodemask()
1128 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
1236 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ argument
1239 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1242 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ argument
1245 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1294 int node; in alloc_gigantic_page() local
1305 for_each_node_mask(node, *nodemask) { in alloc_gigantic_page()
[all …]
Dzswap.c137 struct hlist_node node; member
286 struct rb_node *node = root->rb_node; in zswap_rb_search() local
289 while (node) { in zswap_rb_search()
290 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search()
292 node = node->rb_left; in zswap_rb_search()
294 node = node->rb_right; in zswap_rb_search()
415 static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_prepare() argument
417 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); in zswap_cpu_comp_prepare()
433 static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) in zswap_cpu_comp_dead() argument
435 struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); in zswap_cpu_comp_dead()
[all …]
Dksm.c157 struct rb_node node; /* when node of stable tree */ member
207 struct rb_node node; /* when node of unstable tree */ member
371 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); in stable_node_dup_del()
610 rb_replace_node(&dup->node, &chain->node, root); in alloc_stable_node_chain()
627 rb_erase(&chain->node, root); in free_stable_node_chain()
812 rb_erase(&rmap_item->node, in remove_rmap_item_from_tree()
949 struct stable_node, node); in remove_all_stable_nodes()
1430 rb_replace_node(&stable_node->node, &found->node, in stable_node_dup()
1580 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search()
1705 rb_link_node(&page_node->node, parent, new); in stable_tree_search()
[all …]
Dmempolicy.c136 int numa_map_to_online_node(int node) in numa_map_to_online_node() argument
140 if (node == NUMA_NO_NODE || node_online(node)) in numa_map_to_online_node()
141 return node; in numa_map_to_online_node()
143 min_node = node; in numa_map_to_online_node()
145 dist = node_distance(node, n); in numa_map_to_online_node()
159 int node; in get_task_policy() local
164 node = numa_node_id(); in get_task_policy()
165 if (node != NUMA_NO_NODE) { in get_task_policy()
166 pol = &preferred_node_policy[node]; in get_task_policy()
350 int node = first_node(pol->w.user_nodemask); in mpol_rebind_preferred() local
[all …]
Dvmpressure.c157 struct list_head node; member
168 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_event()
416 list_add(&ev->node, &vmpr->events); in vmpressure_register_event()
442 list_for_each_entry(ev, &vmpr->events, node) { in vmpressure_unregister_event()
445 list_del(&ev->node); in vmpressure_unregister_event()
Dkhugepaged.c882 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument
886 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); in khugepaged_alloc_page()
958 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page() argument
1066 int node, int referenced, int unmapped) in collapse_huge_page() argument
1091 new_page = khugepaged_alloc_page(hpage, gfp, node); in collapse_huge_page()
1247 int node = NUMA_NO_NODE, unmapped = 0; in khugepaged_scan_pmd() local
1329 node = page_to_nid(page); in khugepaged_scan_pmd()
1330 if (khugepaged_scan_abort(node)) { in khugepaged_scan_pmd()
1334 khugepaged_node_load[node]++; in khugepaged_scan_pmd()
1385 node = khugepaged_find_target_node(); in khugepaged_scan_pmd()
[all …]
Dmemory_hotplug.c234 int node = pgdat->node_id; in register_page_bootmem_info_node() local
241 get_page_bootmem(node, page, NODE_INFO); in register_page_bootmem_info_node()
254 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) in register_page_bootmem_info_node()
647 static void node_states_set_node(int node, struct memory_notify *arg) in node_states_set_node() argument
650 node_set_state(node, N_NORMAL_MEMORY); in node_states_set_node()
653 node_set_state(node, N_HIGH_MEMORY); in node_states_set_node()
656 node_set_state(node, N_MEMORY); in node_states_set_node()
1481 static void node_states_clear_node(int node, struct memory_notify *arg) in node_states_clear_node() argument
1484 node_clear_state(node, N_NORMAL_MEMORY); in node_states_clear_node()
1487 node_clear_state(node, N_HIGH_MEMORY); in node_states_clear_node()
[all …]
Dcma_debug.c19 struct hlist_node node; member
74 hlist_add_head(&mem->node, &cma->mem_head); in cma_add_to_cma_mem_list()
84 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node); in cma_get_entry_from_list()
85 hlist_del_init(&mem->node); in cma_get_entry_from_list()
Dmigrate.c1585 struct list_head *pagelist, int node) in do_move_pages_to_node() argument
1589 .nid = node, in do_move_pages_to_node()
1610 int node, struct list_head *pagelist, bool migrate_all) in add_page_for_migration() argument
1636 if (page_to_nid(page) == node) in add_page_for_migration()
1675 static int move_pages_and_store_status(struct mm_struct *mm, int node, in move_pages_and_store_status() argument
1684 err = do_move_pages_to_node(mm, pagelist, node); in move_pages_and_store_status()
1698 return store_status(status, start, node, i - start); in move_pages_and_store_status()
1721 int node; in do_pages_move() local
1726 if (get_user(node, nodes + i)) in do_pages_move()
1731 if (node < 0 || node >= MAX_NUMNODES) in do_pages_move()
[all …]
Dvmstat.c973 unsigned long sum_zone_node_page_state(int node, in sum_zone_node_page_state() argument
976 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
990 unsigned long sum_zone_numa_state(int node, in sum_zone_numa_state() argument
993 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_state()
1368 loff_t node = *pos; in frag_start() local
1371 pgdat && node; in frag_start()
1373 --node; in frag_start()
1985 int node; in init_cpu_node_state() local
1987 for_each_online_node(node) { in init_cpu_node_state()
1988 if (cpumask_weight(cpumask_of_node(node)) > 0) in init_cpu_node_state()
[all …]
Dmmu_notifier.c104 struct interval_tree_node *node; in mn_itree_inv_start_range() local
109 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
111 if (node) { in mn_itree_inv_start_range()
113 res = container_of(node, struct mmu_interval_notifier, in mn_itree_inv_start_range()
126 struct interval_tree_node *node; in mn_itree_inv_next() local
128 node = interval_tree_iter_next(&interval_sub->interval_tree, in mn_itree_inv_next()
130 if (!node) in mn_itree_inv_next()
132 return container_of(node, struct mmu_interval_notifier, interval_tree); in mn_itree_inv_next()
Dpage_alloc.c5978 static int find_next_best_node(int node, nodemask_t *used_node_mask) in find_next_best_node() argument
5985 if (!node_isset(node, *used_node_mask)) { in find_next_best_node()
5986 node_set(node, *used_node_mask); in find_next_best_node()
5987 return node; in find_next_best_node()
5997 val = node_distance(node, n); in find_next_best_node()
6000 val += (n < node); in find_next_best_node()
6039 pg_data_t *node = NODE_DATA(node_order[i]); in build_zonelists_in_node_order() local
6041 nr_zones = build_zonerefs_node(node, zonerefs); in build_zonelists_in_node_order()
6073 int node, load, nr_nodes = 0; in build_zonelists() local
6083 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { in build_zonelists()
[all …]
Dutil.c587 void *kvmalloc_node(size_t size, gfp_t flags, int node) in kvmalloc_node() argument
598 return kmalloc_node(size, flags, node); in kvmalloc_node()
618 ret = kmalloc_node(size, kmalloc_flags, node); in kvmalloc_node()
634 return __vmalloc_node(size, 1, flags, node, in kvmalloc_node()
Dkmemleak.c122 struct hlist_node node; member
476 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu()
477 hlist_del(&area->node); in free_object_rcu()
823 INIT_HLIST_NODE(&area->node); in add_scan_area()
827 hlist_add_head(&area->node, &object->area_list); in add_scan_area()
1360 hlist_for_each_entry(area, &object->area_list, node) in scan_object()
Dslab.h627 static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) in get_node() argument
629 return s->node[node]; in get_node()
Dmemcontrol.c3680 int node, cpu, i; in memcg_flush_percpu_vmstats() local
3690 for_each_node(node) { in memcg_flush_percpu_vmstats()
3691 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in memcg_flush_percpu_vmstats()
3702 for (pi = pn; pi; pi = parent_nodeinfo(pi, node)) in memcg_flush_percpu_vmstats()
5240 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_node_info() argument
5243 int tmp = node; in alloc_mem_cgroup_per_node_info()
5252 if (!node_state(node, N_NORMAL_MEMORY)) in alloc_mem_cgroup_per_node_info()
5278 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5282 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_node_info() argument
5284 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
[all …]
/mm/kasan/
Dinit.c80 static __init void *early_alloc(size_t size, int node) in early_alloc() argument
83 MEMBLOCK_ALLOC_ACCESSIBLE, node); in early_alloc()
87 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); in early_alloc()

12